Commit 1b44cb91 authored by Andy Grover's avatar Andy Grover

Merge groveronline.com:/root/bk/linux-2.5

into groveronline.com:/root/bk/linux-acpi
parents e7f00314 343da22d
...@@ -920,8 +920,6 @@ L: autofs@linux.kernel.org ...@@ -920,8 +920,6 @@ L: autofs@linux.kernel.org
S: Maintained S: Maintained
KERNEL BUILD (Makefile, Rules.make, scripts/*) KERNEL BUILD (Makefile, Rules.make, scripts/*)
P: Keith Owens
M: kaos@ocs.com.au
P: Michael Elizabeth Chastain P: Michael Elizabeth Chastain
M: mec@shout.net M: mec@shout.net
L: kbuild-devel@lists.sourceforge.net L: kbuild-devel@lists.sourceforge.net
...@@ -1064,12 +1062,6 @@ W: http://www.acc.umu.se/~mcalinux/ ...@@ -1064,12 +1062,6 @@ W: http://www.acc.umu.se/~mcalinux/
L: linux-kernel@vger.kernel.org L: linux-kernel@vger.kernel.org
S: Maintained S: Maintained
MODULE SUPPORT [GENERAL], KMOD
P: Keith Owens
M: kaos@ocs.com.au
L: linux-kernel@vger.kernel.org
S: Maintained
MOUSE AND MISC DEVICES [GENERAL] MOUSE AND MISC DEVICES [GENERAL]
P: Alessandro Rubini P: Alessandro Rubini
M: rubini@ipvvis.unipv.it M: rubini@ipvvis.unipv.it
......
...@@ -2909,10 +2909,8 @@ static void DAC960_RequestFunction(RequestQueue_T *RequestQueue) ...@@ -2909,10 +2909,8 @@ static void DAC960_RequestFunction(RequestQueue_T *RequestQueue)
static inline void DAC960_ProcessCompletedBuffer(BufferHeader_T *BufferHeader, static inline void DAC960_ProcessCompletedBuffer(BufferHeader_T *BufferHeader,
boolean SuccessfulIO) boolean SuccessfulIO)
{ {
if (SuccessfulIO) bio_endio(BufferHeader, BufferHeader->bi_size, SuccessfulIO ? 0 : -EIO);
set_bit(BIO_UPTODATE, &BufferHeader->bi_flags);
blk_finished_io(bio_sectors(BufferHeader)); blk_finished_io(bio_sectors(BufferHeader));
BufferHeader->bi_end_io(BufferHeader);
} }
static inline int DAC960_PartitionByCommand(DAC960_Command_T *Command) static inline int DAC960_PartitionByCommand(DAC960_Command_T *Command)
......
...@@ -544,9 +544,16 @@ static void process_page(unsigned long data) ...@@ -544,9 +544,16 @@ static void process_page(unsigned long data)
while(return_bio) { while(return_bio) {
struct bio *bio = return_bio; struct bio *bio = return_bio;
int bytes = bio->bi_size;
return_bio = bio->bi_next; return_bio = bio->bi_next;
bio->bi_next = NULL; bio->bi_next = NULL;
bio->bi_end_io(bio); /* should use bio_endio(), however already cleared
* BIO_UPTODATE. so set bio->bi_size = 0 manually to indicate
* completely done
*/
bio->bi_size = 0;
bio->bi_end_io(bio, bytes, 0);
} }
} }
...@@ -560,8 +567,6 @@ static int mm_make_request(request_queue_t *q, struct bio *bio) ...@@ -560,8 +567,6 @@ static int mm_make_request(request_queue_t *q, struct bio *bio)
struct cardinfo *card = q->queuedata; struct cardinfo *card = q->queuedata;
PRINTK("mm_make_request %ld %d\n", bh->b_rsector, bh->b_size); PRINTK("mm_make_request %ld %d\n", bh->b_rsector, bh->b_size);
/* set uptodate now, and clear it if there are any errors */
set_bit(BIO_UPTODATE, &bio->bi_flags);
bio->bi_phys_segments = bio->bi_idx; /* count of completed segments*/ bio->bi_phys_segments = bio->bi_idx; /* count of completed segments*/
spin_lock_bh(&card->lock); spin_lock_bh(&card->lock);
*card->biotail = bio; *card->biotail = bio;
......
...@@ -144,7 +144,7 @@ static int linear_make_request (request_queue_t *q, struct bio *bio) ...@@ -144,7 +144,7 @@ static int linear_make_request (request_queue_t *q, struct bio *bio)
if (!hash->dev1) { if (!hash->dev1) {
printk ("linear_make_request : hash->dev1==NULL for block %ld\n", printk ("linear_make_request : hash->dev1==NULL for block %ld\n",
block); block);
bio_io_error(bio); bio_io_error(bio, bio->bi_size);
return 0; return 0;
} }
tmp_dev = hash->dev1; tmp_dev = hash->dev1;
...@@ -154,7 +154,7 @@ static int linear_make_request (request_queue_t *q, struct bio *bio) ...@@ -154,7 +154,7 @@ static int linear_make_request (request_queue_t *q, struct bio *bio)
if (block >= (tmp_dev->size + tmp_dev->offset) if (block >= (tmp_dev->size + tmp_dev->offset)
|| block < tmp_dev->offset) { || block < tmp_dev->offset) {
printk ("linear_make_request: Block %ld out of bounds on dev %s size %ld offset %ld\n", block, bdevname(tmp_dev->rdev->bdev), tmp_dev->size, tmp_dev->offset); printk ("linear_make_request: Block %ld out of bounds on dev %s size %ld offset %ld\n", block, bdevname(tmp_dev->rdev->bdev), tmp_dev->size, tmp_dev->offset);
bio_io_error(bio); bio_io_error(bio, bio->bi_size);
return 0; return 0;
} }
bio->bi_bdev = tmp_dev->rdev->bdev; bio->bi_bdev = tmp_dev->rdev->bdev;
......
...@@ -144,7 +144,7 @@ static mddev_t *mddev_map[MAX_MD_DEVS]; ...@@ -144,7 +144,7 @@ static mddev_t *mddev_map[MAX_MD_DEVS];
static int md_fail_request (request_queue_t *q, struct bio *bio) static int md_fail_request (request_queue_t *q, struct bio *bio)
{ {
bio_io_error(bio); bio_io_error(bio, bio->bi_size);
return 0; return 0;
} }
...@@ -361,9 +361,13 @@ static void free_disk_sb(mdk_rdev_t * rdev) ...@@ -361,9 +361,13 @@ static void free_disk_sb(mdk_rdev_t * rdev)
} }
static void bi_complete(struct bio *bio) static int bi_complete(struct bio *bio, unsigned int bytes_done, int error)
{ {
if (bio->bi_size)
return 1;
complete((struct completion*)bio->bi_private); complete((struct completion*)bio->bi_private);
return 0;
} }
static int sync_page_io(struct block_device *bdev, sector_t sector, int size, static int sync_page_io(struct block_device *bdev, sector_t sector, int size,
......
...@@ -109,17 +109,20 @@ static void multipath_end_bh_io (struct multipath_bh *mp_bh, int uptodate) ...@@ -109,17 +109,20 @@ static void multipath_end_bh_io (struct multipath_bh *mp_bh, int uptodate)
struct bio *bio = mp_bh->master_bio; struct bio *bio = mp_bh->master_bio;
multipath_conf_t *conf = mddev_to_conf(mp_bh->mddev); multipath_conf_t *conf = mddev_to_conf(mp_bh->mddev);
bio_endio(bio, uptodate); bio_endio(bio, bio->bi_size, uptodate ? 0 : -EIO);
mempool_free(mp_bh, conf->pool); mempool_free(mp_bh, conf->pool);
} }
void multipath_end_request(struct bio *bio) int multipath_end_request(struct bio *bio, unsigned int bytes_done, int error)
{ {
int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
struct multipath_bh * mp_bh = (struct multipath_bh *)(bio->bi_private); struct multipath_bh * mp_bh = (struct multipath_bh *)(bio->bi_private);
multipath_conf_t *conf = mddev_to_conf(mp_bh->mddev); multipath_conf_t *conf = mddev_to_conf(mp_bh->mddev);
mdk_rdev_t *rdev = conf->multipaths[mp_bh->path].rdev; mdk_rdev_t *rdev = conf->multipaths[mp_bh->path].rdev;
if (bio->bi_size)
return 1;
if (uptodate) if (uptodate)
multipath_end_bh_io(mp_bh, uptodate); multipath_end_bh_io(mp_bh, uptodate);
else { else {
...@@ -132,7 +135,7 @@ void multipath_end_request(struct bio *bio) ...@@ -132,7 +135,7 @@ void multipath_end_request(struct bio *bio)
multipath_reschedule_retry(mp_bh); multipath_reschedule_retry(mp_bh);
} }
atomic_dec(&rdev->nr_pending); atomic_dec(&rdev->nr_pending);
return; return 0;
} }
/* /*
......
...@@ -323,7 +323,7 @@ static int raid0_make_request (request_queue_t *q, struct bio *bio) ...@@ -323,7 +323,7 @@ static int raid0_make_request (request_queue_t *q, struct bio *bio)
bad_zone1: bad_zone1:
printk ("raid0_make_request bug: hash->zone1==NULL for block %ld\n", block); printk ("raid0_make_request bug: hash->zone1==NULL for block %ld\n", block);
outerr: outerr:
bio_io_error(bio); bio_io_error(bio, bio->bi_size);
return 0; return 0;
} }
......
...@@ -236,7 +236,7 @@ static void raid_end_bio_io(r1bio_t *r1_bio, int uptodate) ...@@ -236,7 +236,7 @@ static void raid_end_bio_io(r1bio_t *r1_bio, int uptodate)
{ {
struct bio *bio = r1_bio->master_bio; struct bio *bio = r1_bio->master_bio;
bio_endio(bio, uptodate); bio_endio(bio, bio->bi_size, uptodate ? 0 : -EIO);
free_r1bio(r1_bio); free_r1bio(r1_bio);
} }
...@@ -251,12 +251,15 @@ static void inline update_head_pos(int disk, r1bio_t *r1_bio) ...@@ -251,12 +251,15 @@ static void inline update_head_pos(int disk, r1bio_t *r1_bio)
r1_bio->sector + (r1_bio->master_bio->bi_size >> 9); r1_bio->sector + (r1_bio->master_bio->bi_size >> 9);
} }
static void end_request(struct bio *bio) static int end_request(struct bio *bio, unsigned int bytes_done, int error)
{ {
int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
r1bio_t * r1_bio = (r1bio_t *)(bio->bi_private); r1bio_t * r1_bio = (r1bio_t *)(bio->bi_private);
int mirror; int mirror;
conf_t *conf = mddev_to_conf(r1_bio->mddev); conf_t *conf = mddev_to_conf(r1_bio->mddev);
if (bio->bi_size)
return 1;
if (r1_bio->cmd == READ || r1_bio->cmd == READA) if (r1_bio->cmd == READ || r1_bio->cmd == READA)
mirror = r1_bio->read_disk; mirror = r1_bio->read_disk;
...@@ -313,6 +316,7 @@ static void end_request(struct bio *bio) ...@@ -313,6 +316,7 @@ static void end_request(struct bio *bio)
raid_end_bio_io(r1_bio, uptodate); raid_end_bio_io(r1_bio, uptodate);
} }
atomic_dec(&conf->mirrors[mirror].rdev->nr_pending); atomic_dec(&conf->mirrors[mirror].rdev->nr_pending);
return 0;
} }
/* /*
...@@ -748,12 +752,15 @@ static int raid1_remove_disk(mddev_t *mddev, int number) ...@@ -748,12 +752,15 @@ static int raid1_remove_disk(mddev_t *mddev, int number)
#define REDIRECT_SECTOR KERN_ERR \ #define REDIRECT_SECTOR KERN_ERR \
"raid1: %s: redirecting sector %lu to another mirror\n" "raid1: %s: redirecting sector %lu to another mirror\n"
static void end_sync_read(struct bio *bio) static int end_sync_read(struct bio *bio, unsigned int bytes_done, int error)
{ {
int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
r1bio_t * r1_bio = (r1bio_t *)(bio->bi_private); r1bio_t * r1_bio = (r1bio_t *)(bio->bi_private);
conf_t *conf = mddev_to_conf(r1_bio->mddev); conf_t *conf = mddev_to_conf(r1_bio->mddev);
if (bio->bi_size)
return 1;
if (r1_bio->read_bio != bio) if (r1_bio->read_bio != bio)
BUG(); BUG();
update_head_pos(r1_bio->read_disk, r1_bio); update_head_pos(r1_bio->read_disk, r1_bio);
...@@ -769,9 +776,10 @@ static void end_sync_read(struct bio *bio) ...@@ -769,9 +776,10 @@ static void end_sync_read(struct bio *bio)
set_bit(R1BIO_Uptodate, &r1_bio->state); set_bit(R1BIO_Uptodate, &r1_bio->state);
atomic_dec(&conf->mirrors[r1_bio->read_disk].rdev->nr_pending); atomic_dec(&conf->mirrors[r1_bio->read_disk].rdev->nr_pending);
reschedule_retry(r1_bio); reschedule_retry(r1_bio);
return 0;
} }
static void end_sync_write(struct bio *bio) static int end_sync_write(struct bio *bio, unsigned int bytes_done, int error)
{ {
int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
r1bio_t * r1_bio = (r1bio_t *)(bio->bi_private); r1bio_t * r1_bio = (r1bio_t *)(bio->bi_private);
...@@ -780,6 +788,9 @@ static void end_sync_write(struct bio *bio) ...@@ -780,6 +788,9 @@ static void end_sync_write(struct bio *bio)
int i; int i;
int mirror=0; int mirror=0;
if (bio->bi_size)
return 1;
for (i = 0; i < conf->raid_disks; i++) for (i = 0; i < conf->raid_disks; i++)
if (r1_bio->write_bios[i] == bio) { if (r1_bio->write_bios[i] == bio) {
mirror = i; mirror = i;
...@@ -795,6 +806,7 @@ static void end_sync_write(struct bio *bio) ...@@ -795,6 +806,7 @@ static void end_sync_write(struct bio *bio)
put_buf(r1_bio); put_buf(r1_bio);
} }
atomic_dec(&conf->mirrors[mirror].rdev->nr_pending); atomic_dec(&conf->mirrors[mirror].rdev->nr_pending);
return 0;
} }
static void sync_request_write(mddev_t *mddev, r1bio_t *r1_bio) static void sync_request_write(mddev_t *mddev, r1bio_t *r1_bio)
......
...@@ -321,13 +321,17 @@ static void shrink_stripes(raid5_conf_t *conf) ...@@ -321,13 +321,17 @@ static void shrink_stripes(raid5_conf_t *conf)
conf->slab_cache = NULL; conf->slab_cache = NULL;
} }
static void raid5_end_read_request (struct bio * bi) static int raid5_end_read_request (struct bio * bi, unsigned int bytes_done,
int error)
{ {
struct stripe_head *sh = bi->bi_private; struct stripe_head *sh = bi->bi_private;
raid5_conf_t *conf = sh->raid_conf; raid5_conf_t *conf = sh->raid_conf;
int disks = conf->raid_disks, i; int disks = conf->raid_disks, i;
int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags); int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags);
if (bi->bi_size)
return 1;
for (i=0 ; i<disks; i++) for (i=0 ; i<disks; i++)
if (bi == &sh->dev[i].req) if (bi == &sh->dev[i].req)
break; break;
...@@ -335,7 +339,7 @@ static void raid5_end_read_request (struct bio * bi) ...@@ -335,7 +339,7 @@ static void raid5_end_read_request (struct bio * bi)
PRINTK("end_read_request %lu/%d, count: %d, uptodate %d.\n", sh->sector, i, atomic_read(&sh->count), uptodate); PRINTK("end_read_request %lu/%d, count: %d, uptodate %d.\n", sh->sector, i, atomic_read(&sh->count), uptodate);
if (i == disks) { if (i == disks) {
BUG(); BUG();
return; return 0;
} }
if (uptodate) { if (uptodate) {
...@@ -384,9 +388,11 @@ static void raid5_end_read_request (struct bio * bi) ...@@ -384,9 +388,11 @@ static void raid5_end_read_request (struct bio * bi)
clear_bit(R5_LOCKED, &sh->dev[i].flags); clear_bit(R5_LOCKED, &sh->dev[i].flags);
set_bit(STRIPE_HANDLE, &sh->state); set_bit(STRIPE_HANDLE, &sh->state);
release_stripe(sh); release_stripe(sh);
return 0;
} }
static void raid5_end_write_request (struct bio *bi) static int raid5_end_write_request (struct bio *bi, unsigned int bytes_done,
int error)
{ {
struct stripe_head *sh = bi->bi_private; struct stripe_head *sh = bi->bi_private;
raid5_conf_t *conf = sh->raid_conf; raid5_conf_t *conf = sh->raid_conf;
...@@ -394,6 +400,9 @@ static void raid5_end_write_request (struct bio *bi) ...@@ -394,6 +400,9 @@ static void raid5_end_write_request (struct bio *bi)
unsigned long flags; unsigned long flags;
int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags); int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags);
if (bi->bi_size)
return 1;
for (i=0 ; i<disks; i++) for (i=0 ; i<disks; i++)
if (bi == &sh->dev[i].req) if (bi == &sh->dev[i].req)
break; break;
...@@ -401,7 +410,7 @@ static void raid5_end_write_request (struct bio *bi) ...@@ -401,7 +410,7 @@ static void raid5_end_write_request (struct bio *bi)
PRINTK("end_write_request %lu/%d, count %d, uptodate: %d.\n", sh->sector, i, atomic_read(&sh->count), uptodate); PRINTK("end_write_request %lu/%d, count %d, uptodate: %d.\n", sh->sector, i, atomic_read(&sh->count), uptodate);
if (i == disks) { if (i == disks) {
BUG(); BUG();
return; return 0;
} }
spin_lock_irqsave(&conf->device_lock, flags); spin_lock_irqsave(&conf->device_lock, flags);
...@@ -414,6 +423,7 @@ static void raid5_end_write_request (struct bio *bi) ...@@ -414,6 +423,7 @@ static void raid5_end_write_request (struct bio *bi)
set_bit(STRIPE_HANDLE, &sh->state); set_bit(STRIPE_HANDLE, &sh->state);
__release_stripe(conf, sh); __release_stripe(conf, sh);
spin_unlock_irqrestore(&conf->device_lock, flags); spin_unlock_irqrestore(&conf->device_lock, flags);
return 0;
} }
...@@ -1135,9 +1145,12 @@ static void handle_stripe(struct stripe_head *sh) ...@@ -1135,9 +1145,12 @@ static void handle_stripe(struct stripe_head *sh)
spin_unlock(&sh->lock); spin_unlock(&sh->lock);
while ((bi=return_bi)) { while ((bi=return_bi)) {
int bytes = bi->bi_size;
return_bi = bi->bi_next; return_bi = bi->bi_next;
bi->bi_next = NULL; bi->bi_next = NULL;
bi->bi_end_io(bi); bi->bi_size = 0;
bi->bi_end_io(bi, bytes, 0);
} }
for (i=disks; i-- ;) for (i=disks; i-- ;)
if (sh->dev[i].flags & ((1<<R5_Wantwrite)|(1<<R5_Wantread))) { if (sh->dev[i].flags & ((1<<R5_Wantwrite)|(1<<R5_Wantread))) {
...@@ -1236,7 +1249,6 @@ static int make_request (request_queue_t *q, struct bio * bi) ...@@ -1236,7 +1249,6 @@ static int make_request (request_queue_t *q, struct bio * bi)
last_sector = bi->bi_sector + (bi->bi_size>>9); last_sector = bi->bi_sector + (bi->bi_size>>9);
bi->bi_next = NULL; bi->bi_next = NULL;
set_bit(BIO_UPTODATE, &bi->bi_flags); /* will be cleared if error detected */
bi->bi_phys_segments = 1; /* over-loaded to count active stripes */ bi->bi_phys_segments = 1; /* over-loaded to count active stripes */
for (;logical_sector < last_sector; logical_sector += STRIPE_SECTORS) { for (;logical_sector < last_sector; logical_sector += STRIPE_SECTORS) {
...@@ -1257,8 +1269,12 @@ static int make_request (request_queue_t *q, struct bio * bi) ...@@ -1257,8 +1269,12 @@ static int make_request (request_queue_t *q, struct bio * bi)
} }
} }
spin_lock_irq(&conf->device_lock); spin_lock_irq(&conf->device_lock);
if (--bi->bi_phys_segments == 0) if (--bi->bi_phys_segments == 0) {
bi->bi_end_io(bi); int bytes = bi->bi_size;
bi->bi_size = 0;
bi->bi_end_io(bi, bytes, 0);
}
spin_unlock_irq(&conf->device_lock); spin_unlock_irq(&conf->device_lock);
return 0; return 0;
} }
......
...@@ -120,8 +120,9 @@ obj-$(CONFIG_BLK_DEV_SR) += sr_mod.o ...@@ -120,8 +120,9 @@ obj-$(CONFIG_BLK_DEV_SR) += sr_mod.o
obj-$(CONFIG_CHR_DEV_SG) += sg.o obj-$(CONFIG_CHR_DEV_SG) += sg.o
scsi_mod-objs := scsi.o hosts.o scsi_ioctl.o constants.o scsicam.o \ scsi_mod-objs := scsi.o hosts.o scsi_ioctl.o constants.o scsicam.o \
scsi_proc.o scsi_error.o scsi_queue.o scsi_lib.o \ scsi_proc.o scsi_error.o scsi_lib.o scsi_merge.o \
scsi_merge.o scsi_scan.o scsi_syms.o scsi_scan.o scsi_syms.o
sd_mod-objs := sd.o sd_mod-objs := sd.o
sr_mod-objs := sr.o sr_ioctl.o sr_vendor.o sr_mod-objs := sr.o sr_ioctl.o sr_vendor.o
initio-objs := ini9100u.o i91uscsi.o initio-objs := ini9100u.o i91uscsi.o
......
...@@ -593,6 +593,90 @@ inline void __scsi_release_command(Scsi_Cmnd * SCpnt) ...@@ -593,6 +593,90 @@ inline void __scsi_release_command(Scsi_Cmnd * SCpnt)
wake_up(&SDpnt->scpnt_wait); wake_up(&SDpnt->scpnt_wait);
} }
/*
* Function: scsi_mlqueue_insert()
*
* Purpose: Insert a command in the midlevel queue.
*
* Arguments: cmd - command that we are adding to queue.
* reason - why we are inserting command to queue.
*
* Lock status: Assumed that lock is not held upon entry.
*
* Returns: Nothing.
*
* Notes: We do this for one of two cases. Either the host is busy
* and it cannot accept any more commands for the time being,
* or the device returned QUEUE_FULL and can accept no more
* commands.
* Notes: This could be called either from an interrupt context or a
* normal process context.
*/
static int scsi_mlqueue_insert(Scsi_Cmnd * cmd, int reason)
{
struct Scsi_Host *host = cmd->host;
unsigned long flags;
SCSI_LOG_MLQUEUE(1,
printk("Inserting command %p into mlqueue\n", cmd));
/*
* We are inserting the command into the ml queue. First, we
* cancel the timer, so it doesn't time out.
*/
scsi_delete_timer(cmd);
/*
* Next, set the appropriate busy bit for the device/host.
*
* If the host/device isn't busy, assume that something actually
* completed, and that we should be able to queue a command now.
*
* Note that there is an implicit assumption that every host can
* always queue at least one command. If a host is inactive and
* cannot queue any commands, I don't see how things could
* possibly work anyways.
*/
if (reason == SCSI_MLQUEUE_HOST_BUSY) {
if (host->host_busy == 0) {
if (scsi_retry_command(cmd) == 0) {
return 0;
}
}
host->host_blocked = TRUE;
} else {
if (cmd->device->device_busy == 0) {
if (scsi_retry_command(cmd) == 0) {
return 0;
}
}
cmd->device->device_blocked = TRUE;
}
/*
* Register the fact that we own the thing for now.
*/
cmd->state = SCSI_STATE_MLQUEUE;
cmd->owner = SCSI_OWNER_MIDLEVEL;
cmd->bh_next = NULL;
/*
* Decrement the counters, since these commands are no longer
* active on the host/device.
*/
spin_lock_irqsave(cmd->host->host_lock, flags);
cmd->host->host_busy--;
cmd->device->device_busy--;
spin_unlock_irqrestore(cmd->host->host_lock, flags);
/*
* Insert this command at the head of the queue for it's device.
* It will go before all other commands that are already in the queue.
*/
scsi_insert_special_cmd(cmd, 1);
return 0;
}
/* /*
* Function: scsi_release_command * Function: scsi_release_command
* *
......
...@@ -460,11 +460,6 @@ int scsi_free(void *, unsigned int); ...@@ -460,11 +460,6 @@ int scsi_free(void *, unsigned int);
extern void scsi_initialize_merge_fn(Scsi_Device *SDpnt); extern void scsi_initialize_merge_fn(Scsi_Device *SDpnt);
extern int scsi_init_io(Scsi_Cmnd *SCpnt); extern int scsi_init_io(Scsi_Cmnd *SCpnt);
/*
* Prototypes for functions in scsi_queue.c
*/
extern int scsi_mlqueue_insert(Scsi_Cmnd * cmd, int reason);
/* /*
* Prototypes for functions in scsi_lib.c * Prototypes for functions in scsi_lib.c
*/ */
......
/*
* scsi_queue.c Copyright (C) 1997 Eric Youngdale
*
* generic mid-level SCSI queueing.
*
* The point of this is that we need to track when hosts are unable to
* accept a command because they are busy. In addition, we track devices
* that cannot accept a command because of a QUEUE_FULL condition. In both
* of these cases, we enter the command in the queue. At some later point,
* we attempt to remove commands from the queue and retry them.
*/
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/timer.h>
#include <linux/string.h>
#include <linux/slab.h>
#include <linux/ioport.h>
#include <linux/kernel.h>
#include <linux/stat.h>
#include <linux/blk.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/smp_lock.h>
#define __KERNEL_SYSCALLS__
#include <linux/unistd.h>
#include <asm/system.h>
#include <asm/irq.h>
#include <asm/dma.h>
#include "scsi.h"
#include "hosts.h"
/*
* TODO:
* 1) Prevent multiple traversals of list to look for commands to
* queue.
* 2) Protect against multiple insertions of list at the same time.
* DONE:
* 1) Set state of scsi command to a new state value for ml queue.
* 2) Insert into queue when host rejects command.
* 3) Make sure status code is properly passed from low-level queue func
* so that internal_cmnd properly returns the right value.
* 4) Insert into queue when QUEUE_FULL.
* 5) Cull queue in bottom half handler.
* 6) Check usage count prior to queue insertion. Requeue if usage
* count is 0.
* 7) Don't send down any more commands if the host/device is busy.
*/
static const char RCSid[] = "$Header: /mnt/ide/home/eric/CVSROOT/linux/drivers/scsi/scsi_queue.c,v 1.1 1997/10/21 11:16:38 eric Exp $";
/*
* Function: scsi_mlqueue_insert()
*
* Purpose: Insert a command in the midlevel queue.
*
* Arguments: cmd - command that we are adding to queue.
* reason - why we are inserting command to queue.
*
* Lock status: Assumed that lock is not held upon entry.
*
* Returns: Nothing.
*
* Notes: We do this for one of two cases. Either the host is busy
* and it cannot accept any more commands for the time being,
* or the device returned QUEUE_FULL and can accept no more
* commands.
* Notes: This could be called either from an interrupt context or a
* normal process context.
*/
int scsi_mlqueue_insert(Scsi_Cmnd * cmd, int reason)
{
struct Scsi_Host *host;
unsigned long flags;
SCSI_LOG_MLQUEUE(1, printk("Inserting command %p into mlqueue\n", cmd));
/*
* We are inserting the command into the ml queue. First, we
* cancel the timer, so it doesn't time out.
*/
scsi_delete_timer(cmd);
host = cmd->host;
/*
* Next, set the appropriate busy bit for the device/host.
*/
if (reason == SCSI_MLQUEUE_HOST_BUSY) {
/*
* Protect against race conditions. If the host isn't busy,
* assume that something actually completed, and that we should
* be able to queue a command now. Note that there is an implicit
* assumption that every host can always queue at least one command.
* If a host is inactive and cannot queue any commands, I don't see
* how things could possibly work anyways.
*/
if (host->host_busy == 0) {
if (scsi_retry_command(cmd) == 0) {
return 0;
}
}
host->host_blocked = TRUE;
} else {
/*
* Protect against race conditions. If the device isn't busy,
* assume that something actually completed, and that we should
* be able to queue a command now. Note that there is an implicit
* assumption that every host can always queue at least one command.
* If a host is inactive and cannot queue any commands, I don't see
* how things could possibly work anyways.
*/
if (cmd->device->device_busy == 0) {
if (scsi_retry_command(cmd) == 0) {
return 0;
}
}
cmd->device->device_blocked = TRUE;
}
/*
* Register the fact that we own the thing for now.
*/
cmd->state = SCSI_STATE_MLQUEUE;
cmd->owner = SCSI_OWNER_MIDLEVEL;
cmd->bh_next = NULL;
/*
* Decrement the counters, since these commands are no longer
* active on the host/device.
*/
spin_lock_irqsave(cmd->host->host_lock, flags);
cmd->host->host_busy--;
cmd->device->device_busy--;
spin_unlock_irqrestore(cmd->host->host_lock, flags);
/*
* Insert this command at the head of the queue for it's device.
* It will go before all other commands that are already in the queue.
*/
scsi_insert_special_cmd(cmd, 1);
return 0;
}
...@@ -110,7 +110,7 @@ void bio_destructor(struct bio *bio) ...@@ -110,7 +110,7 @@ void bio_destructor(struct bio *bio)
inline void bio_init(struct bio *bio) inline void bio_init(struct bio *bio)
{ {
bio->bi_next = NULL; bio->bi_next = NULL;
bio->bi_flags = 0; bio->bi_flags = 1 << BIO_UPTODATE;
bio->bi_rw = 0; bio->bi_rw = 0;
bio->bi_vcnt = 0; bio->bi_vcnt = 0;
bio->bi_idx = 0; bio->bi_idx = 0;
...@@ -523,13 +523,31 @@ void ll_rw_kio(int rw, struct kiobuf *kio, struct block_device *bdev, sector_t s ...@@ -523,13 +523,31 @@ void ll_rw_kio(int rw, struct kiobuf *kio, struct block_device *bdev, sector_t s
end_kio_request(kio, !err); end_kio_request(kio, !err);
} }
/**
* bio_endio - end I/O on a bio
* @bio: bio
* @bytes_done: number of bytes completed
* @error: error, if any
*
* Description:
* bio_endio() will end I/O @bytes_done number of bytes. This may be just
* a partial part of the bio, or it may be the whole bio. bio_endio() is
* the preferred way to end I/O on a bio, it takes care of decrementing
* bi_size and clearing BIO_UPTODATE on error. @error is 0 on success, and
* and one of the established -Exxxx (-EIO, for instance) error values in
* case something went wrong.
**/
int bio_endio(struct bio *bio, unsigned int bytes_done, int error) int bio_endio(struct bio *bio, unsigned int bytes_done, int error)
{ {
if (!error) if (error)
set_bit(BIO_UPTODATE, &bio->bi_flags);
else
clear_bit(BIO_UPTODATE, &bio->bi_flags); clear_bit(BIO_UPTODATE, &bio->bi_flags);
if (unlikely(bytes_done > bio->bi_size)) {
printk("%s: want %u bytes done, only %u left\n", __FUNCTION__,
bytes_done, bio->bi_size);
bytes_done = bio->bi_size;
}
bio->bi_size -= bytes_done; bio->bi_size -= bytes_done;
return bio->bi_end_io(bio, bytes_done, error); return bio->bi_end_io(bio, bytes_done, error);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment