Commit f741fc08 authored by Linus Torvalds's avatar Linus Torvalds

Merge http://nfsclient.bkbits.net/linux-2.5

into penguin.transmeta.com:/home/penguin/torvalds/repositories/kernel/linux
parents 49013e91 c418d3f8
...@@ -1098,6 +1098,7 @@ static inline int idedisk_supports_host_protected_area(ide_drive_t *drive) ...@@ -1098,6 +1098,7 @@ static inline int idedisk_supports_host_protected_area(ide_drive_t *drive)
* in above order (i.e., if value of higher priority is available, * in above order (i.e., if value of higher priority is available,
* reset will be ignored). * reset will be ignored).
*/ */
#define IDE_STROKE_LIMIT (32000*1024*2)
static void init_idedisk_capacity (ide_drive_t *drive) static void init_idedisk_capacity (ide_drive_t *drive)
{ {
struct hd_driveid *id = drive->id; struct hd_driveid *id = drive->id;
...@@ -1118,7 +1119,7 @@ static void init_idedisk_capacity (ide_drive_t *drive) ...@@ -1118,7 +1119,7 @@ static void init_idedisk_capacity (ide_drive_t *drive)
drive->cyl = (unsigned int) capacity_2 / (drive->head * drive->sect); drive->cyl = (unsigned int) capacity_2 / (drive->head * drive->sect);
drive->select.b.lba = 1; drive->select.b.lba = 1;
set_max_ext = idedisk_read_native_max_address_ext(drive); set_max_ext = idedisk_read_native_max_address_ext(drive);
if (set_max_ext > capacity_2) { if (set_max_ext > capacity_2 && capacity_2 > IDE_STROKE_LIMIT) {
#ifdef CONFIG_IDEDISK_STROKE #ifdef CONFIG_IDEDISK_STROKE
set_max_ext = idedisk_read_native_max_address_ext(drive); set_max_ext = idedisk_read_native_max_address_ext(drive);
set_max_ext = idedisk_set_max_address_ext(drive, set_max_ext); set_max_ext = idedisk_set_max_address_ext(drive, set_max_ext);
...@@ -1145,7 +1146,7 @@ static void init_idedisk_capacity (ide_drive_t *drive) ...@@ -1145,7 +1146,7 @@ static void init_idedisk_capacity (ide_drive_t *drive)
drive->select.b.lba = 1; drive->select.b.lba = 1;
} }
if (set_max > capacity) { if (set_max > capacity && capacity > IDE_STROKE_LIMIT) {
#ifdef CONFIG_IDEDISK_STROKE #ifdef CONFIG_IDEDISK_STROKE
set_max = idedisk_read_native_max_address(drive); set_max = idedisk_read_native_max_address(drive);
set_max = idedisk_set_max_address(drive, set_max); set_max = idedisk_set_max_address(drive, set_max);
......
...@@ -903,6 +903,14 @@ int ide_config_drive_speed (ide_drive_t *drive, u8 speed) ...@@ -903,6 +903,14 @@ int ide_config_drive_speed (ide_drive_t *drive, u8 speed)
* Select the drive, and issue the SETFEATURES command * Select the drive, and issue the SETFEATURES command
*/ */
disable_irq_nosync(hwif->irq); disable_irq_nosync(hwif->irq);
/*
* FIXME: we race against the running IRQ here if
* this is called from non IRQ context. If we use
* disable_irq() we hang on the error path. Work
* is needed.
*/
udelay(1); udelay(1);
SELECT_DRIVE(drive); SELECT_DRIVE(drive);
SELECT_MASK(drive, 0); SELECT_MASK(drive, 0);
......
...@@ -1670,7 +1670,7 @@ int ide_cmd_ioctl (ide_drive_t *drive, unsigned int cmd, unsigned long arg) ...@@ -1670,7 +1670,7 @@ int ide_cmd_ioctl (ide_drive_t *drive, unsigned int cmd, unsigned long arg)
#else #else
int err = 0; int err = -EIO;
u8 args[4], *argbuf = args; u8 args[4], *argbuf = args;
u8 xfer_rate = 0; u8 xfer_rate = 0;
int argsize = 0; int argsize = 0;
......
...@@ -244,14 +244,6 @@ static int ide_timing_compute(ide_drive_t *drive, short speed, struct ide_timing ...@@ -244,14 +244,6 @@ static int ide_timing_compute(ide_drive_t *drive, short speed, struct ide_timing
ide_timing_merge(&p, t, t, IDE_TIMING_CYCLE | IDE_TIMING_CYC8B); ide_timing_merge(&p, t, t, IDE_TIMING_CYCLE | IDE_TIMING_CYC8B);
} }
/*
* If the drive is an ATAPI device it may need slower address setup timing,
* so we stay on the safe side.
*/
if (drive->media != ide_disk)
p.setup = 120;
/* /*
* Convert the timing to bus clock counts. * Convert the timing to bus clock counts.
*/ */
......
...@@ -37,7 +37,11 @@ static inline dev_info_t *which_dev(mddev_t *mddev, sector_t sector) ...@@ -37,7 +37,11 @@ static inline dev_info_t *which_dev(mddev_t *mddev, sector_t sector)
linear_conf_t *conf = mddev_to_conf(mddev); linear_conf_t *conf = mddev_to_conf(mddev);
sector_t block = sector >> 1; sector_t block = sector >> 1;
hash = conf->hash_table + sector_div(block, conf->smallest->size); /*
* sector_div(a,b) returns the remainer and sets a to a/b
*/
(void)sector_div(block, conf->smallest->size);
hash = conf->hash_table + block;
if ((sector>>1) >= (hash->dev0->size + hash->dev0->offset)) if ((sector>>1) >= (hash->dev0->size + hash->dev0->offset))
return hash->dev1; return hash->dev1;
...@@ -75,8 +79,6 @@ static int linear_run (mddev_t *mddev) ...@@ -75,8 +79,6 @@ static int linear_run (mddev_t *mddev)
unsigned int curr_offset; unsigned int curr_offset;
struct list_head *tmp; struct list_head *tmp;
MOD_INC_USE_COUNT;
conf = kmalloc (sizeof (*conf), GFP_KERNEL); conf = kmalloc (sizeof (*conf), GFP_KERNEL);
if (!conf) if (!conf)
goto out; goto out;
...@@ -163,7 +165,6 @@ static int linear_run (mddev_t *mddev) ...@@ -163,7 +165,6 @@ static int linear_run (mddev_t *mddev)
out: out:
if (conf) if (conf)
kfree(conf); kfree(conf);
MOD_DEC_USE_COUNT;
return 1; return 1;
} }
...@@ -174,8 +175,6 @@ static int linear_stop (mddev_t *mddev) ...@@ -174,8 +175,6 @@ static int linear_stop (mddev_t *mddev)
kfree(conf->hash_table); kfree(conf->hash_table);
kfree(conf); kfree(conf);
MOD_DEC_USE_COUNT;
return 0; return 0;
} }
...@@ -189,7 +188,7 @@ static int linear_make_request (request_queue_t *q, struct bio *bio) ...@@ -189,7 +188,7 @@ static int linear_make_request (request_queue_t *q, struct bio *bio)
block = bio->bi_sector >> 1; block = bio->bi_sector >> 1;
if (unlikely(!tmp_dev)) { if (unlikely(!tmp_dev)) {
printk ("linear_make_request : hash->dev1==NULL for block %llu\n", printk("linear_make_request: hash->dev1==NULL for block %llu\n",
(unsigned long long)block); (unsigned long long)block);
bio_io_error(bio, bio->bi_size); bio_io_error(bio, bio->bi_size);
return 0; return 0;
...@@ -199,7 +198,7 @@ static int linear_make_request (request_queue_t *q, struct bio *bio) ...@@ -199,7 +198,7 @@ static int linear_make_request (request_queue_t *q, struct bio *bio)
|| block < tmp_dev->offset)) { || block < tmp_dev->offset)) {
char b[BDEVNAME_SIZE]; char b[BDEVNAME_SIZE];
printk ("linear_make_request: Block %llu out of bounds on " printk("linear_make_request: Block %llu out of bounds on "
"dev %s size %ld offset %ld\n", "dev %s size %ld offset %ld\n",
(unsigned long long)block, (unsigned long long)block,
bdevname(tmp_dev->rdev->bdev, b), bdevname(tmp_dev->rdev->bdev, b),
...@@ -242,6 +241,7 @@ static void linear_status (struct seq_file *seq, mddev_t *mddev) ...@@ -242,6 +241,7 @@ static void linear_status (struct seq_file *seq, mddev_t *mddev)
static mdk_personality_t linear_personality= static mdk_personality_t linear_personality=
{ {
.name = "linear", .name = "linear",
.owner = THIS_MODULE,
.make_request = linear_make_request, .make_request = linear_make_request,
.run = linear_run, .run = linear_run,
.stop = linear_stop, .stop = linear_stop,
......
...@@ -64,6 +64,7 @@ static void autostart_arrays (void); ...@@ -64,6 +64,7 @@ static void autostart_arrays (void);
#endif #endif
static mdk_personality_t *pers[MAX_PERSONALITY]; static mdk_personality_t *pers[MAX_PERSONALITY];
static spinlock_t pers_lock = SPIN_LOCK_UNLOCKED;
/* /*
* Current RAID-1,4,5 parallel reconstruction 'guaranteed speed limit' * Current RAID-1,4,5 parallel reconstruction 'guaranteed speed limit'
...@@ -302,22 +303,6 @@ static sector_t zoned_raid_size(mddev_t *mddev) ...@@ -302,22 +303,6 @@ static sector_t zoned_raid_size(mddev_t *mddev)
return 0; return 0;
} }
#define BAD_MAGIC KERN_ERR \
"md: invalid raid superblock magic on %s\n"
#define BAD_MINOR KERN_ERR \
"md: %s: invalid raid minor (%x)\n"
#define OUT_OF_MEM KERN_ALERT \
"md: out of memory.\n"
#define NO_SB KERN_ERR \
"md: disabled device %s, could not read superblock.\n"
#define BAD_CSUM KERN_WARNING \
"md: invalid superblock checksum on %s\n"
static int alloc_disk_sb(mdk_rdev_t * rdev) static int alloc_disk_sb(mdk_rdev_t * rdev)
{ {
if (rdev->sb_page) if (rdev->sb_page)
...@@ -325,7 +310,7 @@ static int alloc_disk_sb(mdk_rdev_t * rdev) ...@@ -325,7 +310,7 @@ static int alloc_disk_sb(mdk_rdev_t * rdev)
rdev->sb_page = alloc_page(GFP_KERNEL); rdev->sb_page = alloc_page(GFP_KERNEL);
if (!rdev->sb_page) { if (!rdev->sb_page) {
printk(OUT_OF_MEM); printk(KERN_ALERT "md: out of memory.\n");
return -EINVAL; return -EINVAL;
} }
...@@ -397,7 +382,8 @@ static int read_disk_sb(mdk_rdev_t * rdev) ...@@ -397,7 +382,8 @@ static int read_disk_sb(mdk_rdev_t * rdev)
return 0; return 0;
fail: fail:
printk(NO_SB,bdev_partition_name(rdev->bdev)); printk(KERN_ERR "md: disabled device %s, could not read superblock.\n",
bdev_partition_name(rdev->bdev));
return -EINVAL; return -EINVAL;
} }
...@@ -526,27 +512,30 @@ static int super_90_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version ...@@ -526,27 +512,30 @@ static int super_90_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version
sb = (mdp_super_t*)page_address(rdev->sb_page); sb = (mdp_super_t*)page_address(rdev->sb_page);
if (sb->md_magic != MD_SB_MAGIC) { if (sb->md_magic != MD_SB_MAGIC) {
printk(BAD_MAGIC, bdev_partition_name(rdev->bdev)); printk(KERN_ERR "md: invalid raid superblock magic on %s\n",
bdev_partition_name(rdev->bdev));
goto abort; goto abort;
} }
if (sb->major_version != 0 || if (sb->major_version != 0 ||
sb->minor_version != 90) { sb->minor_version != 90) {
printk(KERN_WARNING "Bad version number %d.%d on %s\n", printk(KERN_WARNING "Bad version number %d.%d on %s\n",
sb->major_version, sb->minor_version, sb->major_version, sb->minor_version,
bdev_partition_name(rdev->bdev)); bdev_partition_name(rdev->bdev));
goto abort; goto abort;
} }
if (sb->md_minor >= MAX_MD_DEVS) { if (sb->md_minor >= MAX_MD_DEVS) {
printk(BAD_MINOR, bdev_partition_name(rdev->bdev), sb->md_minor); printk(KERN_ERR "md: %s: invalid raid minor (%x)\n",
bdev_partition_name(rdev->bdev), sb->md_minor);
goto abort; goto abort;
} }
if (sb->raid_disks <= 0) if (sb->raid_disks <= 0)
goto abort; goto abort;
if (calc_sb_csum(sb) != sb->sb_csum) { if (calc_sb_csum(sb) != sb->sb_csum) {
printk(BAD_CSUM, bdev_partition_name(rdev->bdev)); printk(KERN_WARNING "md: invalid superblock checksum on %s\n",
bdev_partition_name(rdev->bdev));
goto abort; goto abort;
} }
...@@ -565,14 +554,15 @@ static int super_90_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version ...@@ -565,14 +554,15 @@ static int super_90_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version
mdp_super_t *refsb = (mdp_super_t*)page_address(refdev->sb_page); mdp_super_t *refsb = (mdp_super_t*)page_address(refdev->sb_page);
if (!uuid_equal(refsb, sb)) { if (!uuid_equal(refsb, sb)) {
printk(KERN_WARNING "md: %s has different UUID to %s\n", printk(KERN_WARNING "md: %s has different UUID to %s\n",
bdev_partition_name(rdev->bdev), bdev_partition_name(rdev->bdev),
bdev_partition_name(refdev->bdev)); bdev_partition_name(refdev->bdev));
goto abort; goto abort;
} }
if (!sb_equal(refsb, sb)) { if (!sb_equal(refsb, sb)) {
printk(KERN_WARNING "md: %s has same UUID but different superblock to %s\n", printk(KERN_WARNING "md: %s has same UUID"
bdev_partition_name(rdev->bdev), " but different superblock to %s\n",
bdev_partition_name(refdev->bdev)); bdev_partition_name(rdev->bdev),
bdev_partition_name(refdev->bdev));
goto abort; goto abort;
} }
ev1 = md_event(sb); ev1 = md_event(sb);
...@@ -826,7 +816,8 @@ static int super_1_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version) ...@@ -826,7 +816,8 @@ static int super_1_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version)
return -EINVAL; return -EINVAL;
if (calc_sb_1_csum(sb) != sb->sb_csum) { if (calc_sb_1_csum(sb) != sb->sb_csum) {
printk(BAD_CSUM, bdev_partition_name(rdev->bdev)); printk("md: invalid superblock checksum on %s\n",
bdev_partition_name(rdev->bdev));
return -EINVAL; return -EINVAL;
} }
rdev->preferred_minor = 0xffff; rdev->preferred_minor = 0xffff;
...@@ -843,9 +834,10 @@ static int super_1_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version) ...@@ -843,9 +834,10 @@ static int super_1_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version)
sb->level != refsb->level || sb->level != refsb->level ||
sb->layout != refsb->layout || sb->layout != refsb->layout ||
sb->chunksize != refsb->chunksize) { sb->chunksize != refsb->chunksize) {
printk(KERN_WARNING "md: %s has strangely different superblock to %s\n", printk(KERN_WARNING "md: %s has strangely different"
bdev_partition_name(rdev->bdev), " superblock to %s\n",
bdev_partition_name(refdev->bdev)); bdev_partition_name(rdev->bdev),
bdev_partition_name(refdev->bdev));
return -EINVAL; return -EINVAL;
} }
ev1 = le64_to_cpu(sb->events); ev1 = le64_to_cpu(sb->events);
...@@ -1020,11 +1012,12 @@ static int bind_rdev_to_array(mdk_rdev_t * rdev, mddev_t * mddev) ...@@ -1020,11 +1012,12 @@ static int bind_rdev_to_array(mdk_rdev_t * rdev, mddev_t * mddev)
} }
same_pdev = match_dev_unit(mddev, rdev); same_pdev = match_dev_unit(mddev, rdev);
if (same_pdev) if (same_pdev)
printk( KERN_WARNING printk(KERN_WARNING
"md%d: WARNING: %s appears to be on the same physical disk as %s. True\n" "md%d: WARNING: %s appears to be on the same physical"
" protection against single-disk failure might be compromised.\n", " disk as %s. True\n protection against single-disk"
" failure might be compromised.\n",
mdidx(mddev), bdev_partition_name(rdev->bdev), mdidx(mddev), bdev_partition_name(rdev->bdev),
bdev_partition_name(same_pdev->bdev)); bdev_partition_name(same_pdev->bdev));
/* Verify rdev->desc_nr is unique. /* Verify rdev->desc_nr is unique.
* If it is -1, assign a free number, else * If it is -1, assign a free number, else
...@@ -1099,7 +1092,8 @@ void md_autodetect_dev(dev_t dev); ...@@ -1099,7 +1092,8 @@ void md_autodetect_dev(dev_t dev);
static void export_rdev(mdk_rdev_t * rdev) static void export_rdev(mdk_rdev_t * rdev)
{ {
printk(KERN_INFO "md: export_rdev(%s)\n",bdev_partition_name(rdev->bdev)); printk(KERN_INFO "md: export_rdev(%s)\n",
bdev_partition_name(rdev->bdev));
if (rdev->mddev) if (rdev->mddev)
MD_BUG(); MD_BUG();
free_disk_sb(rdev); free_disk_sb(rdev);
...@@ -1135,11 +1129,6 @@ static void export_array(mddev_t *mddev) ...@@ -1135,11 +1129,6 @@ static void export_array(mddev_t *mddev)
mddev->major_version = 0; mddev->major_version = 0;
} }
#undef BAD_CSUM
#undef BAD_MAGIC
#undef OUT_OF_MEM
#undef NO_SB
static void print_desc(mdp_disk_t *desc) static void print_desc(mdp_disk_t *desc)
{ {
printk(" DISK<N:%d,%s(%d,%d),R:%d,S:%d>\n", desc->number, printk(" DISK<N:%d,%s(%d,%d),R:%d,S:%d>\n", desc->number,
...@@ -1151,14 +1140,16 @@ static void print_sb(mdp_super_t *sb) ...@@ -1151,14 +1140,16 @@ static void print_sb(mdp_super_t *sb)
{ {
int i; int i;
printk(KERN_INFO "md: SB: (V:%d.%d.%d) ID:<%08x.%08x.%08x.%08x> CT:%08x\n", printk(KERN_INFO
"md: SB: (V:%d.%d.%d) ID:<%08x.%08x.%08x.%08x> CT:%08x\n",
sb->major_version, sb->minor_version, sb->patch_version, sb->major_version, sb->minor_version, sb->patch_version,
sb->set_uuid0, sb->set_uuid1, sb->set_uuid2, sb->set_uuid3, sb->set_uuid0, sb->set_uuid1, sb->set_uuid2, sb->set_uuid3,
sb->ctime); sb->ctime);
printk(KERN_INFO "md: L%d S%08d ND:%d RD:%d md%d LO:%d CS:%d\n", sb->level, printk(KERN_INFO "md: L%d S%08d ND:%d RD:%d md%d LO:%d CS:%d\n",
sb->size, sb->nr_disks, sb->raid_disks, sb->md_minor, sb->level, sb->size, sb->nr_disks, sb->raid_disks,
sb->layout, sb->chunk_size); sb->md_minor, sb->layout, sb->chunk_size);
printk(KERN_INFO "md: UT:%08x ST:%d AD:%d WD:%d FD:%d SD:%d CSUM:%08x E:%08lx\n", printk(KERN_INFO "md: UT:%08x ST:%d AD:%d WD:%d"
" FD:%d SD:%d CSUM:%08x E:%08lx\n",
sb->utime, sb->state, sb->active_disks, sb->working_disks, sb->utime, sb->state, sb->active_disks, sb->working_disks,
sb->failed_disks, sb->spare_disks, sb->failed_disks, sb->spare_disks,
sb->sb_csum, (unsigned long)sb->events_lo); sb->sb_csum, (unsigned long)sb->events_lo);
...@@ -1182,8 +1173,8 @@ static void print_sb(mdp_super_t *sb) ...@@ -1182,8 +1173,8 @@ static void print_sb(mdp_super_t *sb)
static void print_rdev(mdk_rdev_t *rdev) static void print_rdev(mdk_rdev_t *rdev)
{ {
printk(KERN_INFO "md: rdev %s, SZ:%08llu F:%d S:%d DN:%d ", printk(KERN_INFO "md: rdev %s, SZ:%08llu F:%d S:%d DN:%d ",
bdev_partition_name(rdev->bdev), bdev_partition_name(rdev->bdev), (unsigned long long)rdev->size,
(unsigned long long)rdev->size, rdev->faulty, rdev->in_sync, rdev->desc_nr); rdev->faulty, rdev->in_sync, rdev->desc_nr);
if (rdev->sb_loaded) { if (rdev->sb_loaded) {
printk(KERN_INFO "md: rdev superblock:\n"); printk(KERN_INFO "md: rdev superblock:\n");
print_sb((mdp_super_t*)page_address(rdev->sb_page)); print_sb((mdp_super_t*)page_address(rdev->sb_page));
...@@ -1227,13 +1218,15 @@ static int write_disk_sb(mdk_rdev_t * rdev) ...@@ -1227,13 +1218,15 @@ static int write_disk_sb(mdk_rdev_t * rdev)
return 1; return 1;
} }
dprintk(KERN_INFO "(write) %s's sb offset: %llu\n", bdev_partition_name(rdev->bdev), dprintk(KERN_INFO "(write) %s's sb offset: %llu\n",
bdev_partition_name(rdev->bdev),
(unsigned long long)rdev->sb_offset); (unsigned long long)rdev->sb_offset);
if (sync_page_io(rdev->bdev, rdev->sb_offset<<1, MD_SB_BYTES, rdev->sb_page, WRITE)) if (sync_page_io(rdev->bdev, rdev->sb_offset<<1, MD_SB_BYTES, rdev->sb_page, WRITE))
return 0; return 0;
printk("md: write_disk_sb failed for device %s\n", bdev_partition_name(rdev->bdev)); printk("md: write_disk_sb failed for device %s\n",
bdev_partition_name(rdev->bdev));
return 1; return 1;
} }
...@@ -1278,8 +1271,9 @@ static void md_update_sb(mddev_t * mddev) ...@@ -1278,8 +1271,9 @@ static void md_update_sb(mddev_t * mddev)
if (!mddev->persistent) if (!mddev->persistent)
return; return;
dprintk(KERN_INFO "md: updating md%d RAID superblock on device (in sync %d)\n", dprintk(KERN_INFO
mdidx(mddev),mddev->in_sync); "md: updating md%d RAID superblock on device (in sync %d)\n",
mdidx(mddev),mddev->in_sync);
err = 0; err = 0;
ITERATE_RDEV(mddev,rdev,tmp) { ITERATE_RDEV(mddev,rdev,tmp) {
...@@ -1298,10 +1292,12 @@ static void md_update_sb(mddev_t * mddev) ...@@ -1298,10 +1292,12 @@ static void md_update_sb(mddev_t * mddev)
} }
if (err) { if (err) {
if (--count) { if (--count) {
printk(KERN_ERR "md: errors occurred during superblock update, repeating\n"); printk(KERN_ERR "md: errors occurred during superblock"
" update, repeating\n");
goto repeat; goto repeat;
} }
printk(KERN_ERR "md: excessive errors occurred during superblock update, exiting\n"); printk(KERN_ERR \
"md: excessive errors occurred during superblock update, exiting\n");
} }
} }
...@@ -1323,7 +1319,8 @@ static mdk_rdev_t *md_import_device(dev_t newdev, int super_format, int super_mi ...@@ -1323,7 +1319,8 @@ static mdk_rdev_t *md_import_device(dev_t newdev, int super_format, int super_mi
rdev = (mdk_rdev_t *) kmalloc(sizeof(*rdev), GFP_KERNEL); rdev = (mdk_rdev_t *) kmalloc(sizeof(*rdev), GFP_KERNEL);
if (!rdev) { if (!rdev) {
printk(KERN_ERR "md: could not alloc mem for %s!\n", partition_name(newdev)); printk(KERN_ERR "md: could not alloc mem for %s!\n",
partition_name(newdev));
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
} }
memset(rdev, 0, sizeof(*rdev)); memset(rdev, 0, sizeof(*rdev));
...@@ -1345,9 +1342,9 @@ static mdk_rdev_t *md_import_device(dev_t newdev, int super_format, int super_mi ...@@ -1345,9 +1342,9 @@ static mdk_rdev_t *md_import_device(dev_t newdev, int super_format, int super_mi
size = rdev->bdev->bd_inode->i_size >> BLOCK_SIZE_BITS; size = rdev->bdev->bd_inode->i_size >> BLOCK_SIZE_BITS;
if (!size) { if (!size) {
printk(KERN_WARNING printk(KERN_WARNING
"md: %s has zero or unknown size, marking faulty!\n", "md: %s has zero or unknown size, marking faulty!\n",
bdev_partition_name(rdev->bdev)); bdev_partition_name(rdev->bdev));
err = -EINVAL; err = -EINVAL;
goto abort_free; goto abort_free;
} }
...@@ -1356,13 +1353,15 @@ static mdk_rdev_t *md_import_device(dev_t newdev, int super_format, int super_mi ...@@ -1356,13 +1353,15 @@ static mdk_rdev_t *md_import_device(dev_t newdev, int super_format, int super_mi
err = super_types[super_format]. err = super_types[super_format].
load_super(rdev, NULL, super_minor); load_super(rdev, NULL, super_minor);
if (err == -EINVAL) { if (err == -EINVAL) {
printk(KERN_WARNING "md: %s has invalid sb, not importing!\n", printk(KERN_WARNING
bdev_partition_name(rdev->bdev)); "md: %s has invalid sb, not importing!\n",
bdev_partition_name(rdev->bdev));
goto abort_free; goto abort_free;
} }
if (err < 0) { if (err < 0) {
printk(KERN_WARNING "md: could not read %s's sb, not importing!\n", printk(KERN_WARNING
bdev_partition_name(rdev->bdev)); "md: could not read %s's sb, not importing!\n",
bdev_partition_name(rdev->bdev));
goto abort_free; goto abort_free;
} }
} }
...@@ -1384,20 +1383,6 @@ static mdk_rdev_t *md_import_device(dev_t newdev, int super_format, int super_mi ...@@ -1384,20 +1383,6 @@ static mdk_rdev_t *md_import_device(dev_t newdev, int super_format, int super_mi
* Check a full RAID array for plausibility * Check a full RAID array for plausibility
*/ */
#define INCONSISTENT KERN_ERR \
"md: fatal superblock inconsistency in %s -- removing from array\n"
#define OUT_OF_DATE KERN_ERR \
"md: superblock update time inconsistency -- using the most recent one\n"
#define OLD_VERSION KERN_ALERT \
"md: md%d: unsupported raid array version %d.%d.%d\n"
#define NOT_CLEAN_IGNORE KERN_ERR \
"md: md%d: raid array is not clean -- starting background reconstruction\n"
#define UNKNOWN_LEVEL KERN_ERR \
"md: md%d: unsupported raid level %d\n"
static int analyze_sbs(mddev_t * mddev) static int analyze_sbs(mddev_t * mddev)
{ {
...@@ -1415,7 +1400,10 @@ static int analyze_sbs(mddev_t * mddev) ...@@ -1415,7 +1400,10 @@ static int analyze_sbs(mddev_t * mddev)
case 0: case 0:
break; break;
default: default:
printk(INCONSISTENT, bdev_partition_name(rdev->bdev)); printk( KERN_ERR \
"md: fatal superblock inconsistency in %s"
" -- removing from array\n",
bdev_partition_name(rdev->bdev));
kick_rdev_from_array(rdev); kick_rdev_from_array(rdev);
} }
...@@ -1428,8 +1416,9 @@ static int analyze_sbs(mddev_t * mddev) ...@@ -1428,8 +1416,9 @@ static int analyze_sbs(mddev_t * mddev)
if (rdev != freshest) if (rdev != freshest)
if (super_types[mddev->major_version]. if (super_types[mddev->major_version].
validate_super(mddev, rdev)) { validate_super(mddev, rdev)) {
printk(KERN_WARNING "md: kicking non-fresh %s from array!\n", printk(KERN_WARNING "md: kicking non-fresh %s"
bdev_partition_name(rdev->bdev)); " from array!\n",
bdev_partition_name(rdev->bdev));
kick_rdev_from_array(rdev); kick_rdev_from_array(rdev);
continue; continue;
} }
...@@ -1446,26 +1435,24 @@ static int analyze_sbs(mddev_t * mddev) ...@@ -1446,26 +1435,24 @@ static int analyze_sbs(mddev_t * mddev)
*/ */
if (mddev->major_version != MD_MAJOR_VERSION || if (mddev->major_version != MD_MAJOR_VERSION ||
mddev->minor_version > MD_MINOR_VERSION) { mddev->minor_version > MD_MINOR_VERSION) {
printk(KERN_ALERT
printk(OLD_VERSION, mdidx(mddev), mddev->major_version, "md: md%d: unsupported raid array version %d.%d.%d\n",
mddev->minor_version, mddev->patch_version); mdidx(mddev), mddev->major_version,
mddev->minor_version, mddev->patch_version);
goto abort; goto abort;
} }
if ((mddev->recovery_cp != MaxSector) && ((mddev->level == 1) || if ((mddev->recovery_cp != MaxSector) && ((mddev->level == 1) ||
(mddev->level == 4) || (mddev->level == 5))) (mddev->level == 4) || (mddev->level == 5)))
printk(NOT_CLEAN_IGNORE, mdidx(mddev)); printk(KERN_ERR "md: md%d: raid array is not clean"
" -- starting background reconstruction\n",
mdidx(mddev));
return 0; return 0;
abort: abort:
return 1; return 1;
} }
#undef INCONSISTENT
#undef OUT_OF_DATE
#undef OLD_VERSION
#undef OLD_LEVEL
static int device_size_calculation(mddev_t * mddev) static int device_size_calculation(mddev_t * mddev)
{ {
int data_disks = 0; int data_disks = 0;
...@@ -1484,9 +1471,11 @@ static int device_size_calculation(mddev_t * mddev) ...@@ -1484,9 +1471,11 @@ static int device_size_calculation(mddev_t * mddev)
continue; continue;
if (rdev->size < mddev->chunk_size / 1024) { if (rdev->size < mddev->chunk_size / 1024) {
printk(KERN_WARNING printk(KERN_WARNING
"md: Dev %s smaller than chunk_size: %lluk < %dk\n", "md: Dev %s smaller than chunk_size:"
" %lluk < %dk\n",
bdev_partition_name(rdev->bdev), bdev_partition_name(rdev->bdev),
(unsigned long long)rdev->size, mddev->chunk_size / 1024); (unsigned long long)rdev->size,
mddev->chunk_size / 1024);
return -EINVAL; return -EINVAL;
} }
} }
...@@ -1517,7 +1506,8 @@ static int device_size_calculation(mddev_t * mddev) ...@@ -1517,7 +1506,8 @@ static int device_size_calculation(mddev_t * mddev)
data_disks = mddev->raid_disks-1; data_disks = mddev->raid_disks-1;
break; break;
default: default:
printk(UNKNOWN_LEVEL, mdidx(mddev), mddev->level); printk(KERN_ERR "md: md%d: unsupported raid level %d\n",
mdidx(mddev), mddev->level);
goto abort; goto abort;
} }
if (!md_size[mdidx(mddev)]) if (!md_size[mdidx(mddev)])
...@@ -1539,7 +1529,7 @@ static int device_size_calculation(mddev_t * mddev) ...@@ -1539,7 +1529,7 @@ static int device_size_calculation(mddev_t * mddev)
printk(KERN_INFO printk(KERN_INFO
"md%d: %d data-disks, max readahead per data-disk: %ldk\n", "md%d: %d data-disks, max readahead per data-disk: %ldk\n",
mdidx(mddev), data_disks, readahead/data_disks*(PAGE_SIZE/1024)); mdidx(mddev), data_disks, readahead/data_disks*(PAGE_SIZE/1024));
return 0; return 0;
abort: abort:
return 1; return 1;
...@@ -1589,14 +1579,6 @@ static void md_safemode_timeout(unsigned long data) ...@@ -1589,14 +1579,6 @@ static void md_safemode_timeout(unsigned long data)
md_wakeup_thread(mddev->thread); md_wakeup_thread(mddev->thread);
} }
#define TOO_BIG_CHUNKSIZE KERN_ERR \
"too big chunk_size: %d > %d\n"
#define TOO_SMALL_CHUNKSIZE KERN_ERR \
"too small chunk_size: %d < %ld\n"
#define BAD_CHUNKSIZE KERN_ERR \
"no chunksize specified, see 'man raidtab'\n"
static int do_md_run(mddev_t * mddev) static int do_md_run(mddev_t * mddev)
{ {
...@@ -1639,11 +1621,13 @@ static int do_md_run(mddev_t * mddev) ...@@ -1639,11 +1621,13 @@ static int do_md_run(mddev_t * mddev)
* we abort here to be on the safe side. We don't * we abort here to be on the safe side. We don't
* want to continue the bad practice. * want to continue the bad practice.
*/ */
printk(BAD_CHUNKSIZE); printk(KERN_ERR
"no chunksize specified, see 'man raidtab'\n");
return -EINVAL; return -EINVAL;
} }
if (chunk_size > MAX_CHUNK_SIZE) { if (chunk_size > MAX_CHUNK_SIZE) {
printk(TOO_BIG_CHUNKSIZE, chunk_size, MAX_CHUNK_SIZE); printk(KERN_ERR "too big chunk_size: %d > %d\n",
chunk_size, MAX_CHUNK_SIZE);
return -EINVAL; return -EINVAL;
} }
/* /*
...@@ -1654,7 +1638,8 @@ static int do_md_run(mddev_t * mddev) ...@@ -1654,7 +1638,8 @@ static int do_md_run(mddev_t * mddev)
return -EINVAL; return -EINVAL;
} }
if (chunk_size < PAGE_SIZE) { if (chunk_size < PAGE_SIZE) {
printk(TOO_SMALL_CHUNKSIZE, chunk_size, PAGE_SIZE); printk(KERN_ERR "too small chunk_size: %d < %ld\n",
chunk_size, PAGE_SIZE);
return -EINVAL; return -EINVAL;
} }
} }
...@@ -1664,20 +1649,14 @@ static int do_md_run(mddev_t * mddev) ...@@ -1664,20 +1649,14 @@ static int do_md_run(mddev_t * mddev)
return -EINVAL; return -EINVAL;
} }
#ifdef CONFIG_KMOD
if (!pers[pnum]) if (!pers[pnum])
{ {
#ifdef CONFIG_KMOD
char module_name[80]; char module_name[80];
sprintf (module_name, "md-personality-%d", pnum); sprintf (module_name, "md-personality-%d", pnum);
request_module (module_name); request_module (module_name);
if (!pers[pnum])
#endif
{
printk(KERN_ERR "md: personality %d is not loaded!\n",
pnum);
return -EINVAL;
}
} }
#endif
if (device_size_calculation(mddev)) if (device_size_calculation(mddev))
return -EINVAL; return -EINVAL;
...@@ -1711,13 +1690,23 @@ static int do_md_run(mddev_t * mddev) ...@@ -1711,13 +1690,23 @@ static int do_md_run(mddev_t * mddev)
disk = disks[mdidx(mddev)]; disk = disks[mdidx(mddev)];
if (!disk) if (!disk)
return -ENOMEM; return -ENOMEM;
spin_lock(&pers_lock);
if (!pers[pnum] || !try_module_get(pers[pnum]->owner)) {
spin_unlock(&pers_lock);
printk(KERN_ERR "md: personality %d is not loaded!\n",
pnum);
return -EINVAL;
}
mddev->pers = pers[pnum]; mddev->pers = pers[pnum];
spin_unlock(&pers_lock);
blk_queue_make_request(&mddev->queue, mddev->pers->make_request); blk_queue_make_request(&mddev->queue, mddev->pers->make_request);
printk("%s: setting max_sectors to %d, segment boundary to %d\n", printk("%s: setting max_sectors to %d, segment boundary to %d\n",
disk->disk_name, disk->disk_name,
chunk_size >> 9, chunk_size >> 9,
(chunk_size>>1)-1); (chunk_size>>1)-1);
blk_queue_max_sectors(&mddev->queue, chunk_size >> 9); blk_queue_max_sectors(&mddev->queue, chunk_size >> 9);
blk_queue_segment_boundary(&mddev->queue, (chunk_size>>1) - 1); blk_queue_segment_boundary(&mddev->queue, (chunk_size>>1) - 1);
mddev->queue.queuedata = mddev; mddev->queue.queuedata = mddev;
...@@ -1726,6 +1715,7 @@ static int do_md_run(mddev_t * mddev) ...@@ -1726,6 +1715,7 @@ static int do_md_run(mddev_t * mddev)
if (err) { if (err) {
printk(KERN_ERR "md: pers->run() failed ...\n"); printk(KERN_ERR "md: pers->run() failed ...\n");
mddev->pers = NULL; mddev->pers = NULL;
module_put(mddev->pers->owner);
return -EINVAL; return -EINVAL;
} }
atomic_set(&mddev->writes_pending,0); atomic_set(&mddev->writes_pending,0);
...@@ -1741,9 +1731,6 @@ static int do_md_run(mddev_t * mddev) ...@@ -1741,9 +1731,6 @@ static int do_md_run(mddev_t * mddev)
return (0); return (0);
} }
#undef TOO_BIG_CHUNKSIZE
#undef BAD_CHUNKSIZE
static int restart_array(mddev_t *mddev) static int restart_array(mddev_t *mddev)
{ {
struct gendisk *disk = disks[mdidx(mddev)]; struct gendisk *disk = disks[mdidx(mddev)];
...@@ -1765,8 +1752,8 @@ static int restart_array(mddev_t *mddev) ...@@ -1765,8 +1752,8 @@ static int restart_array(mddev_t *mddev)
mddev->ro = 0; mddev->ro = 0;
set_disk_ro(disk, 0); set_disk_ro(disk, 0);
printk(KERN_INFO printk(KERN_INFO "md: md%d switched to read-write mode.\n",
"md: md%d switched to read-write mode.\n", mdidx(mddev)); mdidx(mddev));
/* /*
* Kick recovery or resync if necessary * Kick recovery or resync if necessary
*/ */
...@@ -1783,18 +1770,13 @@ static int restart_array(mddev_t *mddev) ...@@ -1783,18 +1770,13 @@ static int restart_array(mddev_t *mddev)
return err; return err;
} }
#define STILL_MOUNTED KERN_WARNING \
"md: md%d still mounted.\n"
#define STILL_IN_USE \
"md: md%d still in use.\n"
static int do_md_stop(mddev_t * mddev, int ro) static int do_md_stop(mddev_t * mddev, int ro)
{ {
int err = 0; int err = 0;
struct gendisk *disk = disks[mdidx(mddev)]; struct gendisk *disk = disks[mdidx(mddev)];
if (atomic_read(&mddev->active)>2) { if (atomic_read(&mddev->active)>2) {
printk(STILL_IN_USE, mdidx(mddev)); printk("md: md%d still in use.\n",mdidx(mddev));
err = -EBUSY; err = -EBUSY;
goto out; goto out;
} }
...@@ -1824,6 +1806,7 @@ static int do_md_stop(mddev_t * mddev, int ro) ...@@ -1824,6 +1806,7 @@ static int do_md_stop(mddev_t * mddev, int ro)
set_disk_ro(disk, 1); set_disk_ro(disk, 1);
goto out; goto out;
} }
module_put(mddev->pers->owner);
mddev->pers = NULL; mddev->pers = NULL;
if (mddev->ro) if (mddev->ro)
mddev->ro = 0; mddev->ro = 0;
...@@ -1850,7 +1833,8 @@ static int do_md_stop(mddev_t * mddev, int ro) ...@@ -1850,7 +1833,8 @@ static int do_md_stop(mddev_t * mddev, int ro)
if (disk) if (disk)
set_capacity(disk, 0); set_capacity(disk, 0);
} else } else
printk(KERN_INFO "md: md%d switched to read-only mode.\n", mdidx(mddev)); printk(KERN_INFO "md: md%d switched to read-only mode.\n",
mdidx(mddev));
err = 0; err = 0;
out: out:
return err; return err;
...@@ -1905,11 +1889,13 @@ static void autorun_devices(void) ...@@ -1905,11 +1889,13 @@ static void autorun_devices(void)
rdev0 = list_entry(pending_raid_disks.next, rdev0 = list_entry(pending_raid_disks.next,
mdk_rdev_t, same_set); mdk_rdev_t, same_set);
printk(KERN_INFO "md: considering %s ...\n", bdev_partition_name(rdev0->bdev)); printk(KERN_INFO "md: considering %s ...\n",
bdev_partition_name(rdev0->bdev));
INIT_LIST_HEAD(&candidates); INIT_LIST_HEAD(&candidates);
ITERATE_RDEV_PENDING(rdev,tmp) ITERATE_RDEV_PENDING(rdev,tmp)
if (super_90_load(rdev, rdev0, 0) >= 0) { if (super_90_load(rdev, rdev0, 0) >= 0) {
printk(KERN_INFO "md: adding %s ...\n", bdev_partition_name(rdev->bdev)); printk(KERN_INFO "md: adding %s ...\n",
bdev_partition_name(rdev->bdev));
list_move(&rdev->same_set, &candidates); list_move(&rdev->same_set, &candidates);
} }
/* /*
...@@ -1920,7 +1906,8 @@ static void autorun_devices(void) ...@@ -1920,7 +1906,8 @@ static void autorun_devices(void)
mddev = mddev_find(rdev0->preferred_minor); mddev = mddev_find(rdev0->preferred_minor);
if (!mddev) { if (!mddev) {
printk(KERN_ERR "md: cannot allocate memory for md drive.\n"); printk(KERN_ERR
"md: cannot allocate memory for md drive.\n");
break; break;
} }
if (mddev_lock(mddev)) if (mddev_lock(mddev))
...@@ -1928,8 +1915,9 @@ static void autorun_devices(void) ...@@ -1928,8 +1915,9 @@ static void autorun_devices(void)
mdidx(mddev)); mdidx(mddev));
else if (mddev->raid_disks || mddev->major_version else if (mddev->raid_disks || mddev->major_version
|| !list_empty(&mddev->disks)) { || !list_empty(&mddev->disks)) {
printk(KERN_WARNING "md: md%d already running, cannot run %s\n", printk(KERN_WARNING
mdidx(mddev), bdev_partition_name(rdev0->bdev)); "md: md%d already running, cannot run %s\n",
mdidx(mddev), bdev_partition_name(rdev0->bdev));
mddev_unlock(mddev); mddev_unlock(mddev);
} else { } else {
printk(KERN_INFO "md: created md%d\n", mdidx(mddev)); printk(KERN_INFO "md: created md%d\n", mdidx(mddev));
...@@ -1956,33 +1944,6 @@ static void autorun_devices(void) ...@@ -1956,33 +1944,6 @@ static void autorun_devices(void)
* if possible, the array gets run as well. * if possible, the array gets run as well.
*/ */
#define BAD_VERSION KERN_ERR \
"md: %s has RAID superblock version 0.%d, autodetect needs v0.90 or higher\n"
#define OUT_OF_MEM KERN_ALERT \
"md: out of memory.\n"
#define NO_DEVICE KERN_ERR \
"md: disabled device %s\n"
#define AUTOADD_FAILED KERN_ERR \
"md: auto-adding devices to md%d FAILED (error %d).\n"
#define AUTOADD_FAILED_USED KERN_ERR \
"md: cannot auto-add device %s to md%d, already used.\n"
#define AUTORUN_FAILED KERN_ERR \
"md: auto-running md%d FAILED (error %d).\n"
#define MDDEV_BUSY KERN_ERR \
"md: cannot auto-add to md%d, already running.\n"
#define AUTOADDING KERN_INFO \
"md: auto-adding devices to md%d, based on %s's superblock.\n"
#define AUTORUNNING KERN_INFO \
"md: auto-running md%d.\n"
static int autostart_array(dev_t startdev) static int autostart_array(dev_t startdev)
{ {
int err = -EINVAL, i; int err = -EINVAL, i;
...@@ -1991,7 +1952,8 @@ static int autostart_array(dev_t startdev) ...@@ -1991,7 +1952,8 @@ static int autostart_array(dev_t startdev)
start_rdev = md_import_device(startdev, 0, 0); start_rdev = md_import_device(startdev, 0, 0);
if (IS_ERR(start_rdev)) { if (IS_ERR(start_rdev)) {
printk(KERN_WARNING "md: could not import %s!\n", partition_name(startdev)); printk(KERN_WARNING "md: could not import %s!\n",
partition_name(startdev));
return err; return err;
} }
...@@ -2005,8 +1967,9 @@ static int autostart_array(dev_t startdev) ...@@ -2005,8 +1967,9 @@ static int autostart_array(dev_t startdev)
} }
if (start_rdev->faulty) { if (start_rdev->faulty) {
printk(KERN_WARNING "md: can not autostart based on faulty %s!\n", printk(KERN_WARNING
bdev_partition_name(start_rdev->bdev)); "md: can not autostart based on faulty %s!\n",
bdev_partition_name(start_rdev->bdev));
export_rdev(start_rdev); export_rdev(start_rdev);
return err; return err;
} }
...@@ -2025,8 +1988,9 @@ static int autostart_array(dev_t startdev) ...@@ -2025,8 +1988,9 @@ static int autostart_array(dev_t startdev)
continue; continue;
rdev = md_import_device(dev, 0, 0); rdev = md_import_device(dev, 0, 0);
if (IS_ERR(rdev)) { if (IS_ERR(rdev)) {
printk(KERN_WARNING "md: could not import %s, trying to run array nevertheless.\n", printk(KERN_WARNING "md: could not import %s,"
partition_name(dev)); " trying to run array nevertheless.\n",
partition_name(dev));
continue; continue;
} }
list_add(&rdev->same_set, &pending_raid_disks); list_add(&rdev->same_set, &pending_raid_disks);
...@@ -2040,15 +2004,6 @@ static int autostart_array(dev_t startdev) ...@@ -2040,15 +2004,6 @@ static int autostart_array(dev_t startdev)
} }
#undef BAD_VERSION
#undef OUT_OF_MEM
#undef NO_DEVICE
#undef AUTOADD_FAILED_USED
#undef AUTOADD_FAILED
#undef AUTORUN_FAILED
#undef AUTOADDING
#undef AUTORUNNING
static int get_version(void * arg) static int get_version(void * arg)
{ {
...@@ -2113,8 +2068,6 @@ static int get_array_info(mddev_t * mddev, void * arg) ...@@ -2113,8 +2068,6 @@ static int get_array_info(mddev_t * mddev, void * arg)
return 0; return 0;
} }
#undef SET_FROM_SB
static int get_disk_info(mddev_t * mddev, void * arg) static int get_disk_info(mddev_t * mddev, void * arg)
{ {
...@@ -2161,7 +2114,9 @@ static int add_new_disk(mddev_t * mddev, mdu_disk_info_t *info) ...@@ -2161,7 +2114,9 @@ static int add_new_disk(mddev_t * mddev, mdu_disk_info_t *info)
/* expecting a device which has a superblock */ /* expecting a device which has a superblock */
rdev = md_import_device(dev, mddev->major_version, mddev->minor_version); rdev = md_import_device(dev, mddev->major_version, mddev->minor_version);
if (IS_ERR(rdev)) { if (IS_ERR(rdev)) {
printk(KERN_WARNING "md: md_import_device returned %ld\n", PTR_ERR(rdev)); printk(KERN_WARNING
"md: md_import_device returned %ld\n",
PTR_ERR(rdev));
return PTR_ERR(rdev); return PTR_ERR(rdev);
} }
if (!list_empty(&mddev->disks)) { if (!list_empty(&mddev->disks)) {
...@@ -2170,8 +2125,10 @@ static int add_new_disk(mddev_t * mddev, mdu_disk_info_t *info) ...@@ -2170,8 +2125,10 @@ static int add_new_disk(mddev_t * mddev, mdu_disk_info_t *info)
int err = super_types[mddev->major_version] int err = super_types[mddev->major_version]
.load_super(rdev, rdev0, mddev->minor_version); .load_super(rdev, rdev0, mddev->minor_version);
if (err < 0) { if (err < 0) {
printk(KERN_WARNING "md: %s has different UUID to %s\n", printk(KERN_WARNING
bdev_partition_name(rdev->bdev), bdev_partition_name(rdev0->bdev)); "md: %s has different UUID to %s\n",
bdev_partition_name(rdev->bdev),
bdev_partition_name(rdev0->bdev));
export_rdev(rdev); export_rdev(rdev);
return -EINVAL; return -EINVAL;
} }
...@@ -2190,14 +2147,17 @@ static int add_new_disk(mddev_t * mddev, mdu_disk_info_t *info) ...@@ -2190,14 +2147,17 @@ static int add_new_disk(mddev_t * mddev, mdu_disk_info_t *info)
if (mddev->pers) { if (mddev->pers) {
int err; int err;
if (!mddev->pers->hot_add_disk) { if (!mddev->pers->hot_add_disk) {
printk(KERN_WARNING "md%d: personality does not support diskops!\n", printk(KERN_WARNING
"md%d: personality does not support diskops!\n",
mdidx(mddev)); mdidx(mddev));
return -EINVAL; return -EINVAL;
} }
rdev = md_import_device(dev, mddev->major_version, rdev = md_import_device(dev, mddev->major_version,
mddev->minor_version); mddev->minor_version);
if (IS_ERR(rdev)) { if (IS_ERR(rdev)) {
printk(KERN_WARNING "md: md_import_device returned %ld\n", PTR_ERR(rdev)); printk(KERN_WARNING
"md: md_import_device returned %ld\n",
PTR_ERR(rdev));
return PTR_ERR(rdev); return PTR_ERR(rdev);
} }
rdev->in_sync = 0; /* just to be sure */ rdev->in_sync = 0; /* just to be sure */
...@@ -2223,7 +2183,9 @@ static int add_new_disk(mddev_t * mddev, mdu_disk_info_t *info) ...@@ -2223,7 +2183,9 @@ static int add_new_disk(mddev_t * mddev, mdu_disk_info_t *info)
int err; int err;
rdev = md_import_device (dev, -1, 0); rdev = md_import_device (dev, -1, 0);
if (IS_ERR(rdev)) { if (IS_ERR(rdev)) {
printk(KERN_WARNING "md: error, md_import_device() returned %ld\n", PTR_ERR(rdev)); printk(KERN_WARNING
"md: error, md_import_device() returned %ld\n",
PTR_ERR(rdev));
return PTR_ERR(rdev); return PTR_ERR(rdev);
} }
rdev->desc_nr = info->number; rdev->desc_nr = info->number;
...@@ -2333,19 +2295,23 @@ static int hot_add_disk(mddev_t * mddev, dev_t dev) ...@@ -2333,19 +2295,23 @@ static int hot_add_disk(mddev_t * mddev, dev_t dev)
partition_name(dev), mdidx(mddev)); partition_name(dev), mdidx(mddev));
if (mddev->major_version != 0) { if (mddev->major_version != 0) {
printk(KERN_WARNING "md%d: HOT_ADD may only be used with version-0 superblocks.\n", printk(KERN_WARNING "md%d: HOT_ADD may only be used with"
mdidx(mddev)); " version-0 superblocks.\n",
mdidx(mddev));
return -EINVAL; return -EINVAL;
} }
if (!mddev->pers->hot_add_disk) { if (!mddev->pers->hot_add_disk) {
printk(KERN_WARNING "md%d: personality does not support diskops!\n", printk(KERN_WARNING
mdidx(mddev)); "md%d: personality does not support diskops!\n",
mdidx(mddev));
return -EINVAL; return -EINVAL;
} }
rdev = md_import_device (dev, -1, 0); rdev = md_import_device (dev, -1, 0);
if (IS_ERR(rdev)) { if (IS_ERR(rdev)) {
printk(KERN_WARNING "md: error, md_import_device() returned %ld\n", PTR_ERR(rdev)); printk(KERN_WARNING
"md: error, md_import_device() returned %ld\n",
PTR_ERR(rdev));
return -EINVAL; return -EINVAL;
} }
...@@ -2354,16 +2320,18 @@ static int hot_add_disk(mddev_t * mddev, dev_t dev) ...@@ -2354,16 +2320,18 @@ static int hot_add_disk(mddev_t * mddev, dev_t dev)
rdev->size = size; rdev->size = size;
if (size < mddev->size) { if (size < mddev->size) {
printk(KERN_WARNING "md%d: disk size %llu blocks < array size %llu\n", printk(KERN_WARNING
mdidx(mddev), (unsigned long long)size, "md%d: disk size %llu blocks < array size %llu\n",
(unsigned long long)mddev->size); mdidx(mddev), (unsigned long long)size,
(unsigned long long)mddev->size);
err = -ENOSPC; err = -ENOSPC;
goto abort_export; goto abort_export;
} }
if (rdev->faulty) { if (rdev->faulty) {
printk(KERN_WARNING "md: can not hot-add faulty %s disk to md%d!\n", printk(KERN_WARNING
bdev_partition_name(rdev->bdev), mdidx(mddev)); "md: can not hot-add faulty %s disk to md%d!\n",
bdev_partition_name(rdev->bdev), mdidx(mddev));
err = -EINVAL; err = -EINVAL;
goto abort_export; goto abort_export;
} }
...@@ -2378,7 +2346,7 @@ static int hot_add_disk(mddev_t * mddev, dev_t dev) ...@@ -2378,7 +2346,7 @@ static int hot_add_disk(mddev_t * mddev, dev_t dev)
if (rdev->desc_nr == mddev->max_disks) { if (rdev->desc_nr == mddev->max_disks) {
printk(KERN_WARNING "md%d: can not hot-add to full array!\n", printk(KERN_WARNING "md%d: can not hot-add to full array!\n",
mdidx(mddev)); mdidx(mddev));
err = -EBUSY; err = -EBUSY;
goto abort_unbind_export; goto abort_unbind_export;
} }
...@@ -2426,8 +2394,9 @@ static int set_array_info(mddev_t * mddev, mdu_array_info_t *info) ...@@ -2426,8 +2394,9 @@ static int set_array_info(mddev_t * mddev, mdu_array_info_t *info)
info->major_version >= sizeof(super_types)/sizeof(super_types[0]) || info->major_version >= sizeof(super_types)/sizeof(super_types[0]) ||
super_types[info->major_version].name == NULL) { super_types[info->major_version].name == NULL) {
/* maybe try to auto-load a module? */ /* maybe try to auto-load a module? */
printk(KERN_INFO "md: superblock version %d not known\n", printk(KERN_INFO
info->major_version); "md: superblock version %d not known\n",
info->major_version);
return -EINVAL; return -EINVAL;
} }
mddev->major_version = info->major_version; mddev->major_version = info->major_version;
...@@ -2540,7 +2509,7 @@ static int md_ioctl(struct inode *inode, struct file *file, ...@@ -2540,7 +2509,7 @@ static int md_ioctl(struct inode *inode, struct file *file,
err = autostart_array(arg); err = autostart_array(arg);
if (err) { if (err) {
printk(KERN_WARNING "md: autostart %s failed!\n", printk(KERN_WARNING "md: autostart %s failed!\n",
partition_name(arg)); partition_name(arg));
goto abort; goto abort;
} }
goto done; goto done;
...@@ -2548,8 +2517,9 @@ static int md_ioctl(struct inode *inode, struct file *file, ...@@ -2548,8 +2517,9 @@ static int md_ioctl(struct inode *inode, struct file *file,
err = mddev_lock(mddev); err = mddev_lock(mddev);
if (err) { if (err) {
printk(KERN_INFO "md: ioctl lock interrupted, reason %d, cmd %d\n", printk(KERN_INFO
err, cmd); "md: ioctl lock interrupted, reason %d, cmd %d\n",
err, cmd);
goto abort; goto abort;
} }
...@@ -2558,13 +2528,15 @@ static int md_ioctl(struct inode *inode, struct file *file, ...@@ -2558,13 +2528,15 @@ static int md_ioctl(struct inode *inode, struct file *file,
case SET_ARRAY_INFO: case SET_ARRAY_INFO:
if (!list_empty(&mddev->disks)) { if (!list_empty(&mddev->disks)) {
printk(KERN_WARNING "md: array md%d already has disks!\n", printk(KERN_WARNING
"md: array md%d already has disks!\n",
mdidx(mddev)); mdidx(mddev));
err = -EBUSY; err = -EBUSY;
goto abort_unlock; goto abort_unlock;
} }
if (mddev->raid_disks) { if (mddev->raid_disks) {
printk(KERN_WARNING "md: array md%d already initialised!\n", printk(KERN_WARNING
"md: array md%d already initialised!\n",
mdidx(mddev)); mdidx(mddev));
err = -EBUSY; err = -EBUSY;
goto abort_unlock; goto abort_unlock;
...@@ -2579,7 +2551,8 @@ static int md_ioctl(struct inode *inode, struct file *file, ...@@ -2579,7 +2551,8 @@ static int md_ioctl(struct inode *inode, struct file *file,
} }
err = set_array_info(mddev, &info); err = set_array_info(mddev, &info);
if (err) { if (err) {
printk(KERN_WARNING "md: couldn't set array info. %d\n", err); printk(KERN_WARNING "md: couldn't set"
" array info. %d\n", err);
goto abort_unlock; goto abort_unlock;
} }
} }
...@@ -2701,9 +2674,10 @@ static int md_ioctl(struct inode *inode, struct file *file, ...@@ -2701,9 +2674,10 @@ static int md_ioctl(struct inode *inode, struct file *file,
default: default:
if (_IOC_TYPE(cmd) == MD_MAJOR) if (_IOC_TYPE(cmd) == MD_MAJOR)
printk(KERN_WARNING "md: %s(pid %d) used obsolete MD ioctl, " printk(KERN_WARNING "md: %s(pid %d) used"
"upgrade your software to use new ictls.\n", " obsolete MD ioctl, upgrade your"
current->comm, current->pid); " software to use new ictls.\n",
current->comm, current->pid);
err = -EINVAL; err = -EINVAL;
goto abort_unlock; goto abort_unlock;
} }
...@@ -2879,7 +2853,8 @@ void md_unregister_thread(mdk_thread_t *thread) ...@@ -2879,7 +2853,8 @@ void md_unregister_thread(mdk_thread_t *thread)
void md_error(mddev_t *mddev, mdk_rdev_t *rdev) void md_error(mddev_t *mddev, mdk_rdev_t *rdev)
{ {
dprintk("md_error dev:(%d:%d), rdev:(%d:%d), (caller: %p,%p,%p,%p).\n", dprintk("md_error dev:(%d:%d), rdev:(%d:%d), (caller: %p,%p,%p,%p).\n",
MD_MAJOR,mdidx(mddev),MAJOR(rdev->bdev->bd_dev),MINOR(rdev->bdev->bd_dev), MD_MAJOR,mdidx(mddev),
MAJOR(rdev->bdev->bd_dev), MINOR(rdev->bdev->bd_dev),
__builtin_return_address(0),__builtin_return_address(1), __builtin_return_address(0),__builtin_return_address(1),
__builtin_return_address(2),__builtin_return_address(3)); __builtin_return_address(2),__builtin_return_address(3));
...@@ -3038,10 +3013,12 @@ static int md_seq_show(struct seq_file *seq, void *v) ...@@ -3038,10 +3013,12 @@ static int md_seq_show(struct seq_file *seq, void *v)
if (v == (void*)1) { if (v == (void*)1) {
seq_printf(seq, "Personalities : "); seq_printf(seq, "Personalities : ");
spin_lock(&pers_lock);
for (i = 0; i < MAX_PERSONALITY; i++) for (i = 0; i < MAX_PERSONALITY; i++)
if (pers[i]) if (pers[i])
seq_printf(seq, "[%s] ", pers[i]->name); seq_printf(seq, "[%s] ", pers[i]->name);
spin_unlock(&pers_lock);
seq_printf(seq, "\n"); seq_printf(seq, "\n");
return 0; return 0;
} }
...@@ -3125,13 +3102,16 @@ int register_md_personality(int pnum, mdk_personality_t *p) ...@@ -3125,13 +3102,16 @@ int register_md_personality(int pnum, mdk_personality_t *p)
return -EINVAL; return -EINVAL;
} }
spin_lock(&pers_lock);
if (pers[pnum]) { if (pers[pnum]) {
spin_unlock(&pers_lock);
MD_BUG(); MD_BUG();
return -EBUSY; return -EBUSY;
} }
pers[pnum] = p; pers[pnum] = p;
printk(KERN_INFO "md: %s personality registered as nr %d\n", p->name, pnum); printk(KERN_INFO "md: %s personality registered as nr %d\n", p->name, pnum);
spin_unlock(&pers_lock);
return 0; return 0;
} }
...@@ -3143,7 +3123,9 @@ int unregister_md_personality(int pnum) ...@@ -3143,7 +3123,9 @@ int unregister_md_personality(int pnum)
} }
printk(KERN_INFO "md: %s personality unregistered\n", pers[pnum]->name); printk(KERN_INFO "md: %s personality unregistered\n", pers[pnum]->name);
spin_lock(&pers_lock);
pers[pnum] = NULL; pers[pnum] = NULL;
spin_unlock(&pers_lock);
return 0; return 0;
} }
...@@ -3228,7 +3210,8 @@ static inline void md_enter_safemode(mddev_t *mddev) ...@@ -3228,7 +3210,8 @@ static inline void md_enter_safemode(mddev_t *mddev)
void md_handle_safemode(mddev_t *mddev) void md_handle_safemode(mddev_t *mddev)
{ {
if (signal_pending(current)) { if (signal_pending(current)) {
printk(KERN_INFO "md: md%d in immediate safe mode\n",mdidx(mddev)); printk(KERN_INFO "md: md%d in immediate safe mode\n",
mdidx(mddev));
mddev->safemode = 2; mddev->safemode = 2;
flush_signals(current); flush_signals(current);
} }
...@@ -3271,8 +3254,9 @@ static void md_do_sync(mddev_t *mddev) ...@@ -3271,8 +3254,9 @@ static void md_do_sync(mddev_t *mddev)
continue; continue;
if (mddev2->curr_resync && if (mddev2->curr_resync &&
match_mddev_units(mddev,mddev2)) { match_mddev_units(mddev,mddev2)) {
printk(KERN_INFO "md: delaying resync of md%d until md%d " printk(KERN_INFO "md: delaying resync of md%d"
"has finished resync (they share one or more physical units)\n", " until md%d has finished resync (they"
" share one or more physical units)\n",
mdidx(mddev), mdidx(mddev2)); mdidx(mddev), mdidx(mddev2));
if (mddev < mddev2) {/* arbitrarily yield */ if (mddev < mddev2) {/* arbitrarily yield */
mddev->curr_resync = 1; mddev->curr_resync = 1;
...@@ -3295,7 +3279,8 @@ static void md_do_sync(mddev_t *mddev) ...@@ -3295,7 +3279,8 @@ static void md_do_sync(mddev_t *mddev)
max_sectors = mddev->size << 1; max_sectors = mddev->size << 1;
printk(KERN_INFO "md: syncing RAID array md%d\n", mdidx(mddev)); printk(KERN_INFO "md: syncing RAID array md%d\n", mdidx(mddev));
printk(KERN_INFO "md: minimum _guaranteed_ reconstruction speed: %d KB/sec/disc.\n", sysctl_speed_limit_min); printk(KERN_INFO "md: minimum _guaranteed_ reconstruction speed:"
" %d KB/sec/disc.\n", sysctl_speed_limit_min);
printk(KERN_INFO "md: using maximum available idle IO bandwith " printk(KERN_INFO "md: using maximum available idle IO bandwith "
"(but not more than %d KB/sec) for reconstruction.\n", "(but not more than %d KB/sec) for reconstruction.\n",
sysctl_speed_limit_max); sysctl_speed_limit_max);
...@@ -3318,14 +3303,16 @@ static void md_do_sync(mddev_t *mddev) ...@@ -3318,14 +3303,16 @@ static void md_do_sync(mddev_t *mddev)
*/ */
window = 32*(PAGE_SIZE/512); window = 32*(PAGE_SIZE/512);
printk(KERN_INFO "md: using %dk window, over a total of %d blocks.\n", printk(KERN_INFO "md: using %dk window, over a total of %d blocks.\n",
window/2,max_sectors/2); window/2,max_sectors/2);
atomic_set(&mddev->recovery_active, 0); atomic_set(&mddev->recovery_active, 0);
init_waitqueue_head(&mddev->recovery_wait); init_waitqueue_head(&mddev->recovery_wait);
last_check = 0; last_check = 0;
if (j) if (j)
printk(KERN_INFO "md: resuming recovery of md%d from checkpoint.\n", mdidx(mddev)); printk(KERN_INFO
"md: resuming recovery of md%d from checkpoint.\n",
mdidx(mddev));
while (j < max_sectors) { while (j < max_sectors) {
int sectors; int sectors;
...@@ -3367,7 +3354,8 @@ static void md_do_sync(mddev_t *mddev) ...@@ -3367,7 +3354,8 @@ static void md_do_sync(mddev_t *mddev)
/* /*
* got a signal, exit. * got a signal, exit.
*/ */
printk(KERN_INFO "md: md_do_sync() got signal ... exiting\n"); printk(KERN_INFO
"md: md_do_sync() got signal ... exiting\n");
flush_signals(current); flush_signals(current);
set_bit(MD_RECOVERY_INTR, &mddev->recovery); set_bit(MD_RECOVERY_INTR, &mddev->recovery);
goto out; goto out;
...@@ -3408,7 +3396,9 @@ static void md_do_sync(mddev_t *mddev) ...@@ -3408,7 +3396,9 @@ static void md_do_sync(mddev_t *mddev)
mddev->curr_resync > 2 && mddev->curr_resync > 2 &&
mddev->curr_resync > mddev->recovery_cp) { mddev->curr_resync > mddev->recovery_cp) {
if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) { if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
printk(KERN_INFO "md: checkpointing recovery of md%d.\n", mdidx(mddev)); printk(KERN_INFO
"md: checkpointing recovery of md%d.\n",
mdidx(mddev));
mddev->recovery_cp = mddev->curr_resync; mddev->recovery_cp = mddev->curr_resync;
} else } else
mddev->recovery_cp = MaxSector; mddev->recovery_cp = MaxSector;
...@@ -3526,7 +3516,9 @@ void md_check_recovery(mddev_t *mddev) ...@@ -3526,7 +3516,9 @@ void md_check_recovery(mddev_t *mddev)
mddev, mddev,
"md%d_resync"); "md%d_resync");
if (!mddev->sync_thread) { if (!mddev->sync_thread) {
printk(KERN_ERR "md%d: could not start resync thread...\n", mdidx(mddev)); printk(KERN_ERR "md%d: could not start resync"
" thread...\n",
mdidx(mddev));
/* leave the spares where they are, it shouldn't hurt */ /* leave the spares where they are, it shouldn't hurt */
mddev->recovery = 0; mddev->recovery = 0;
} else { } else {
...@@ -3590,7 +3582,8 @@ int __init md_init(void) ...@@ -3590,7 +3582,8 @@ int __init md_init(void)
{ {
int minor; int minor;
printk(KERN_INFO "md: md driver %d.%d.%d MAX_MD_DEVS=%d, MD_SB_DISKS=%d\n", printk(KERN_INFO "md: md driver %d.%d.%d MAX_MD_DEVS=%d,"
" MD_SB_DISKS=%d\n",
MD_MAJOR_VERSION, MD_MINOR_VERSION, MD_MAJOR_VERSION, MD_MINOR_VERSION,
MD_PATCHLEVEL_VERSION, MAX_MD_DEVS, MD_SB_DISKS); MD_PATCHLEVEL_VERSION, MAX_MD_DEVS, MD_SB_DISKS);
......
...@@ -78,7 +78,7 @@ static int multipath_map (mddev_t *mddev, mdk_rdev_t **rdevp) ...@@ -78,7 +78,7 @@ static int multipath_map (mddev_t *mddev, mdk_rdev_t **rdevp)
} }
spin_unlock_irq(&conf->device_lock); spin_unlock_irq(&conf->device_lock);
printk (KERN_ERR "multipath_map(): no more operational IO paths?\n"); printk(KERN_ERR "multipath_map(): no more operational IO paths?\n");
return (-1); return (-1);
} }
...@@ -130,7 +130,8 @@ int multipath_end_request(struct bio *bio, unsigned int bytes_done, int error) ...@@ -130,7 +130,8 @@ int multipath_end_request(struct bio *bio, unsigned int bytes_done, int error)
*/ */
md_error (mp_bh->mddev, rdev); md_error (mp_bh->mddev, rdev);
printk(KERN_ERR "multipath: %s: rescheduling sector %llu\n", printk(KERN_ERR "multipath: %s: rescheduling sector %llu\n",
bdev_partition_name(rdev->bdev), (unsigned long long)bio->bi_sector); bdev_partition_name(rdev->bdev),
(unsigned long long)bio->bi_sector);
multipath_reschedule_retry(mp_bh); multipath_reschedule_retry(mp_bh);
} }
atomic_dec(&rdev->nr_pending); atomic_dec(&rdev->nr_pending);
...@@ -198,16 +199,6 @@ static void multipath_status (struct seq_file *seq, mddev_t *mddev) ...@@ -198,16 +199,6 @@ static void multipath_status (struct seq_file *seq, mddev_t *mddev)
seq_printf (seq, "]"); seq_printf (seq, "]");
} }
#define LAST_DISK KERN_ALERT \
"multipath: only one IO path left and IO error.\n"
#define NO_SPARE_DISK KERN_ALERT \
"multipath: no spare IO path left!\n"
#define DISK_FAILED KERN_ALERT \
"multipath: IO failure on %s, disabling IO path. \n" \
" Operation continuing on %d IO paths.\n"
/* /*
* Careful, this can execute in IRQ contexts as well! * Careful, this can execute in IRQ contexts as well!
...@@ -222,7 +213,8 @@ static void multipath_error (mddev_t *mddev, mdk_rdev_t *rdev) ...@@ -222,7 +213,8 @@ static void multipath_error (mddev_t *mddev, mdk_rdev_t *rdev)
* first check if this is a queued request for a device * first check if this is a queued request for a device
* which has just failed. * which has just failed.
*/ */
printk (LAST_DISK); printk(KERN_ALERT
"multipath: only one IO path left and IO error.\n");
/* leave it active... it's all we have */ /* leave it active... it's all we have */
} else { } else {
/* /*
...@@ -233,17 +225,15 @@ static void multipath_error (mddev_t *mddev, mdk_rdev_t *rdev) ...@@ -233,17 +225,15 @@ static void multipath_error (mddev_t *mddev, mdk_rdev_t *rdev)
rdev->faulty = 1; rdev->faulty = 1;
mddev->sb_dirty = 1; mddev->sb_dirty = 1;
conf->working_disks--; conf->working_disks--;
printk (DISK_FAILED, bdev_partition_name (rdev->bdev), printk(KERN_ALERT "multipath: IO failure on %s,"
" disabling IO path. \n Operation continuing"
" on %d IO paths.\n",
bdev_partition_name (rdev->bdev),
conf->working_disks); conf->working_disks);
} }
} }
} }
#undef LAST_DISK
#undef NO_SPARE_DISK
#undef DISK_FAILED
static void print_multipath_conf (multipath_conf_t *conf) static void print_multipath_conf (multipath_conf_t *conf)
{ {
int i; int i;
...@@ -302,7 +292,7 @@ static int multipath_remove_disk(mddev_t *mddev, int number) ...@@ -302,7 +292,7 @@ static int multipath_remove_disk(mddev_t *mddev, int number)
if (p->rdev) { if (p->rdev) {
if (p->rdev->in_sync || if (p->rdev->in_sync ||
atomic_read(&p->rdev->nr_pending)) { atomic_read(&p->rdev->nr_pending)) {
printk(KERN_ERR "hot-remove-disk, slot %d is identified but is still operational!\n", number); printk(KERN_ERR "hot-remove-disk, slot %d is identified" " but is still operational!\n", number);
err = -EBUSY; err = -EBUSY;
goto abort; goto abort;
} }
...@@ -318,11 +308,7 @@ static int multipath_remove_disk(mddev_t *mddev, int number) ...@@ -318,11 +308,7 @@ static int multipath_remove_disk(mddev_t *mddev, int number)
return err; return err;
} }
#define IO_ERROR KERN_ALERT \
"multipath: %s: unrecoverable IO read error for block %llu\n"
#define REDIRECT_SECTOR KERN_ERR \
"multipath: %s: redirecting sector %llu to another IO path\n"
/* /*
* This is a kernel thread which: * This is a kernel thread which:
...@@ -354,59 +340,22 @@ static void multipathd (mddev_t *mddev) ...@@ -354,59 +340,22 @@ static void multipathd (mddev_t *mddev)
rdev = NULL; rdev = NULL;
if (multipath_map (mddev, &rdev)<0) { if (multipath_map (mddev, &rdev)<0) {
printk(IO_ERROR, printk(KERN_ALERT "multipath: %s: unrecoverable IO read"
bdev_partition_name(bio->bi_bdev), (unsigned long long)bio->bi_sector); " error for block %llu\n",
bdev_partition_name(bio->bi_bdev),
(unsigned long long)bio->bi_sector);
multipath_end_bh_io(mp_bh, 0); multipath_end_bh_io(mp_bh, 0);
} else { } else {
printk(REDIRECT_SECTOR, printk(KERN_ERR "multipath: %s: redirecting sector %llu"
bdev_partition_name(bio->bi_bdev), (unsigned long long)bio->bi_sector); " to another IO path\n",
bdev_partition_name(bio->bi_bdev),
(unsigned long long)bio->bi_sector);
bio->bi_bdev = rdev->bdev; bio->bi_bdev = rdev->bdev;
generic_make_request(bio); generic_make_request(bio);
} }
} }
spin_unlock_irqrestore(&retry_list_lock, flags); spin_unlock_irqrestore(&retry_list_lock, flags);
} }
#undef IO_ERROR
#undef REDIRECT_SECTOR
#define INVALID_LEVEL KERN_WARNING \
"multipath: md%d: raid level not set to multipath IO (%d)\n"
#define NO_SB KERN_ERR \
"multipath: disabled IO path %s (couldn't access raid superblock)\n"
#define ERRORS KERN_ERR \
"multipath: disabled IO path %s (errors detected)\n"
#define NOT_IN_SYNC KERN_ERR \
"multipath: making IO path %s a spare path (not in sync)\n"
#define INCONSISTENT KERN_ERR \
"multipath: disabled IO path %s (inconsistent descriptor)\n"
#define ALREADY_RUNNING KERN_ERR \
"multipath: disabled IO path %s (multipath %d already operational)\n"
#define OPERATIONAL KERN_INFO \
"multipath: device %s operational as IO path %d\n"
#define MEM_ERROR KERN_ERR \
"multipath: couldn't allocate memory for md%d\n"
#define SPARE KERN_INFO \
"multipath: spare IO path %s\n"
#define NONE_OPERATIONAL KERN_ERR \
"multipath: no operational IO paths for md%d\n"
#define SB_DIFFERENCES KERN_ERR \
"multipath: detected IO path differences!\n"
#define ARRAY_IS_ACTIVE KERN_INFO \
"multipath: array md%d active with %d out of %d IO paths\n"
#define THREAD_ERROR KERN_ERR \
"multipath: couldn't allocate thread for md%d\n"
static int multipath_run (mddev_t *mddev) static int multipath_run (mddev_t *mddev)
{ {
...@@ -416,10 +365,9 @@ static int multipath_run (mddev_t *mddev) ...@@ -416,10 +365,9 @@ static int multipath_run (mddev_t *mddev)
mdk_rdev_t *rdev; mdk_rdev_t *rdev;
struct list_head *tmp; struct list_head *tmp;
MOD_INC_USE_COUNT;
if (mddev->level != LEVEL_MULTIPATH) { if (mddev->level != LEVEL_MULTIPATH) {
printk(INVALID_LEVEL, mdidx(mddev), mddev->level); printk("multipath: md%d: raid level not set to multipath IO (%d)\n",
mdidx(mddev), mddev->level);
goto out; goto out;
} }
/* /*
...@@ -431,7 +379,9 @@ static int multipath_run (mddev_t *mddev) ...@@ -431,7 +379,9 @@ static int multipath_run (mddev_t *mddev)
conf = kmalloc(sizeof(multipath_conf_t), GFP_KERNEL); conf = kmalloc(sizeof(multipath_conf_t), GFP_KERNEL);
mddev->private = conf; mddev->private = conf;
if (!conf) { if (!conf) {
printk(MEM_ERROR, mdidx(mddev)); printk(KERN_ERR
"multipath: couldn't allocate memory for md%d\n",
mdidx(mddev));
goto out; goto out;
} }
memset(conf, 0, sizeof(*conf)); memset(conf, 0, sizeof(*conf));
...@@ -455,7 +405,8 @@ static int multipath_run (mddev_t *mddev) ...@@ -455,7 +405,8 @@ static int multipath_run (mddev_t *mddev)
conf->device_lock = SPIN_LOCK_UNLOCKED; conf->device_lock = SPIN_LOCK_UNLOCKED;
if (!conf->working_disks) { if (!conf->working_disks) {
printk(NONE_OPERATIONAL, mdidx(mddev)); printk(KERN_ERR "multipath: no operational IO paths for md%d\n",
mdidx(mddev));
goto out_free_conf; goto out_free_conf;
} }
mddev->degraded = conf->raid_disks = conf->working_disks; mddev->degraded = conf->raid_disks = conf->working_disks;
...@@ -464,7 +415,9 @@ static int multipath_run (mddev_t *mddev) ...@@ -464,7 +415,9 @@ static int multipath_run (mddev_t *mddev)
mp_pool_alloc, mp_pool_free, mp_pool_alloc, mp_pool_free,
NULL); NULL);
if (conf->pool == NULL) { if (conf->pool == NULL) {
printk(MEM_ERROR, mdidx(mddev)); printk(KERN_ERR
"multipath: couldn't allocate memory for md%d\n",
mdidx(mddev));
goto out_free_conf; goto out_free_conf;
} }
...@@ -473,13 +426,15 @@ static int multipath_run (mddev_t *mddev) ...@@ -473,13 +426,15 @@ static int multipath_run (mddev_t *mddev)
mddev->thread = md_register_thread(multipathd, mddev, name); mddev->thread = md_register_thread(multipathd, mddev, name);
if (!mddev->thread) { if (!mddev->thread) {
printk(THREAD_ERROR, mdidx(mddev)); printk(KERN_ERR "multipath: couldn't allocate thread"
" for md%d\n", mdidx(mddev));
goto out_free_conf; goto out_free_conf;
} }
} }
printk(ARRAY_IS_ACTIVE, mdidx(mddev), conf->working_disks, printk(KERN_INFO
mddev->raid_disks); "multipath: array md%d active with %d out of %d IO paths\n",
mdidx(mddev), conf->working_disks, mddev->raid_disks);
/* /*
* Ok, everything is just fine now * Ok, everything is just fine now
*/ */
...@@ -491,21 +446,9 @@ static int multipath_run (mddev_t *mddev) ...@@ -491,21 +446,9 @@ static int multipath_run (mddev_t *mddev)
kfree(conf); kfree(conf);
mddev->private = NULL; mddev->private = NULL;
out: out:
MOD_DEC_USE_COUNT;
return -EIO; return -EIO;
} }
#undef INVALID_LEVEL
#undef NO_SB
#undef ERRORS
#undef NOT_IN_SYNC
#undef INCONSISTENT
#undef ALREADY_RUNNING
#undef OPERATIONAL
#undef SPARE
#undef NONE_OPERATIONAL
#undef SB_DIFFERENCES
#undef ARRAY_IS_ACTIVE
static int multipath_stop (mddev_t *mddev) static int multipath_stop (mddev_t *mddev)
{ {
...@@ -515,13 +458,13 @@ static int multipath_stop (mddev_t *mddev) ...@@ -515,13 +458,13 @@ static int multipath_stop (mddev_t *mddev)
mempool_destroy(conf->pool); mempool_destroy(conf->pool);
kfree(conf); kfree(conf);
mddev->private = NULL; mddev->private = NULL;
MOD_DEC_USE_COUNT;
return 0; return 0;
} }
static mdk_personality_t multipath_personality= static mdk_personality_t multipath_personality=
{ {
.name = "multipath", .name = "multipath",
.owner = THIS_MODULE,
.make_request = multipath_make_request, .make_request = multipath_make_request,
.run = multipath_run, .run = multipath_run,
.stop = multipath_stop, .stop = multipath_stop,
......
...@@ -43,12 +43,15 @@ static int create_strip_zones (mddev_t *mddev) ...@@ -43,12 +43,15 @@ static int create_strip_zones (mddev_t *mddev)
conf->nr_strip_zones = 0; conf->nr_strip_zones = 0;
ITERATE_RDEV(mddev,rdev1,tmp1) { ITERATE_RDEV(mddev,rdev1,tmp1) {
printk("raid0: looking at %s\n", bdev_partition_name(rdev1->bdev)); printk("raid0: looking at %s\n",
bdev_partition_name(rdev1->bdev));
c = 0; c = 0;
ITERATE_RDEV(mddev,rdev2,tmp2) { ITERATE_RDEV(mddev,rdev2,tmp2) {
printk("raid0: comparing %s(%llu) with %s(%llu)\n", printk("raid0: comparing %s(%llu) with %s(%llu)\n",
bdev_partition_name(rdev1->bdev), (unsigned long long)rdev1->size, bdev_partition_name(rdev1->bdev),
bdev_partition_name(rdev2->bdev), (unsigned long long)rdev2->size); (unsigned long long)rdev1->size,
bdev_partition_name(rdev2->bdev),
(unsigned long long)rdev2->size);
if (rdev2 == rdev1) { if (rdev2 == rdev1) {
printk("raid0: END\n"); printk("raid0: END\n");
break; break;
...@@ -94,7 +97,8 @@ static int create_strip_zones (mddev_t *mddev) ...@@ -94,7 +97,8 @@ static int create_strip_zones (mddev_t *mddev)
goto abort; goto abort;
} }
if (zone->dev[j]) { if (zone->dev[j]) {
printk("raid0: multiple devices for %d - aborting!\n", j); printk("raid0: multiple devices for %d - aborting!\n",
j);
goto abort; goto abort;
} }
zone->dev[j] = rdev1; zone->dev[j] = rdev1;
...@@ -103,8 +107,8 @@ static int create_strip_zones (mddev_t *mddev) ...@@ -103,8 +107,8 @@ static int create_strip_zones (mddev_t *mddev)
cnt++; cnt++;
} }
if (cnt != mddev->raid_disks) { if (cnt != mddev->raid_disks) {
printk("raid0: too few disks (%d of %d) - aborting!\n", cnt, printk("raid0: too few disks (%d of %d) - aborting!\n",
mddev->raid_disks); cnt, mddev->raid_disks);
goto abort; goto abort;
} }
zone->nb_dev = cnt; zone->nb_dev = cnt;
...@@ -136,7 +140,7 @@ static int create_strip_zones (mddev_t *mddev) ...@@ -136,7 +140,7 @@ static int create_strip_zones (mddev_t *mddev)
if (!smallest || (rdev->size <smallest->size)) { if (!smallest || (rdev->size <smallest->size)) {
smallest = rdev; smallest = rdev;
printk(" (%llu) is smallest!.\n", printk(" (%llu) is smallest!.\n",
(unsigned long long)rdev->size); (unsigned long long)rdev->size);
} }
} else } else
printk(" nope.\n"); printk(" nope.\n");
...@@ -144,7 +148,8 @@ static int create_strip_zones (mddev_t *mddev) ...@@ -144,7 +148,8 @@ static int create_strip_zones (mddev_t *mddev)
zone->nb_dev = c; zone->nb_dev = c;
zone->size = (smallest->size - current_offset) * c; zone->size = (smallest->size - current_offset) * c;
printk("raid0: zone->nb_dev: %d, size: %llu\n",zone->nb_dev, (unsigned long long)zone->size); printk("raid0: zone->nb_dev: %d, size: %llu\n",
zone->nb_dev, (unsigned long long)zone->size);
if (!conf->smallest || (zone->size < conf->smallest->size)) if (!conf->smallest || (zone->size < conf->smallest->size))
conf->smallest = zone; conf->smallest = zone;
...@@ -153,7 +158,8 @@ static int create_strip_zones (mddev_t *mddev) ...@@ -153,7 +158,8 @@ static int create_strip_zones (mddev_t *mddev)
curr_zone_offset += zone->size; curr_zone_offset += zone->size;
current_offset = smallest->size; current_offset = smallest->size;
printk("raid0: current zone offset: %llu\n", (unsigned long long)current_offset); printk("raid0: current zone offset: %llu\n",
(unsigned long long)current_offset);
} }
printk("raid0: done.\n"); printk("raid0: done.\n");
return 0; return 0;
...@@ -191,8 +197,6 @@ static int raid0_run (mddev_t *mddev) ...@@ -191,8 +197,6 @@ static int raid0_run (mddev_t *mddev)
s64 size; s64 size;
raid0_conf_t *conf; raid0_conf_t *conf;
MOD_INC_USE_COUNT;
conf = vmalloc(sizeof (raid0_conf_t)); conf = vmalloc(sizeof (raid0_conf_t));
if (!conf) if (!conf)
goto out; goto out;
...@@ -201,8 +205,10 @@ static int raid0_run (mddev_t *mddev) ...@@ -201,8 +205,10 @@ static int raid0_run (mddev_t *mddev)
if (create_strip_zones (mddev)) if (create_strip_zones (mddev))
goto out_free_conf; goto out_free_conf;
printk("raid0 : md_size is %llu blocks.\n", (unsigned long long)md_size[mdidx(mddev)]); printk("raid0 : md_size is %llu blocks.\n",
printk("raid0 : conf->smallest->size is %llu blocks.\n", (unsigned long long)conf->smallest->size); (unsigned long long)md_size[mdidx(mddev)]);
printk("raid0 : conf->smallest->size is %llu blocks.\n",
(unsigned long long)conf->smallest->size);
{ {
#if __GNUC__ < 3 #if __GNUC__ < 3
volatile volatile
...@@ -267,7 +273,6 @@ static int raid0_run (mddev_t *mddev) ...@@ -267,7 +273,6 @@ static int raid0_run (mddev_t *mddev)
vfree(conf); vfree(conf);
mddev->private = NULL; mddev->private = NULL;
out: out:
MOD_DEC_USE_COUNT;
return 1; return 1;
} }
...@@ -282,7 +287,6 @@ static int raid0_stop (mddev_t *mddev) ...@@ -282,7 +287,6 @@ static int raid0_stop (mddev_t *mddev)
vfree (conf); vfree (conf);
mddev->private = NULL; mddev->private = NULL;
MOD_DEC_USE_COUNT;
return 0; return 0;
} }
...@@ -357,16 +361,21 @@ static int raid0_make_request (request_queue_t *q, struct bio *bio) ...@@ -357,16 +361,21 @@ static int raid0_make_request (request_queue_t *q, struct bio *bio)
return 1; return 1;
bad_map: bad_map:
printk ("raid0_make_request bug: can't convert block across chunks or bigger than %dk %llu %d\n", chunk_size, (unsigned long long)bio->bi_sector, bio->bi_size >> 10); printk("raid0_make_request bug: can't convert block across chunks"
" or bigger than %dk %llu %d\n", chunk_size,
(unsigned long long)bio->bi_sector, bio->bi_size >> 10);
goto outerr; goto outerr;
bad_hash: bad_hash:
printk("raid0_make_request bug: hash==NULL for block %llu\n", (unsigned long long)block); printk("raid0_make_request bug: hash==NULL for block %llu\n",
(unsigned long long)block);
goto outerr; goto outerr;
bad_zone0: bad_zone0:
printk ("raid0_make_request bug: hash->zone0==NULL for block %llu\n", (unsigned long long)block); printk("raid0_make_request bug: hash->zone0==NULL for block %llu\n",
(unsigned long long)block);
goto outerr; goto outerr;
bad_zone1: bad_zone1:
printk ("raid0_make_request bug: hash->zone1==NULL for block %llu\n", (unsigned long long)block); printk("raid0_make_request bug: hash->zone1==NULL for block %llu\n",
(unsigned long long)block);
outerr: outerr:
bio_io_error(bio, bio->bi_size); bio_io_error(bio, bio->bi_size);
return 0; return 0;
...@@ -411,6 +420,7 @@ static void raid0_status (struct seq_file *seq, mddev_t *mddev) ...@@ -411,6 +420,7 @@ static void raid0_status (struct seq_file *seq, mddev_t *mddev)
static mdk_personality_t raid0_personality= static mdk_personality_t raid0_personality=
{ {
.name = "raid0", .name = "raid0",
.owner = THIS_MODULE,
.make_request = raid0_make_request, .make_request = raid0_make_request,
.run = raid0_run, .run = raid0_run,
.stop = raid0_stop, .stop = raid0_stop,
......
...@@ -217,7 +217,7 @@ static int map(mddev_t *mddev, mdk_rdev_t **rdevp) ...@@ -217,7 +217,7 @@ static int map(mddev_t *mddev, mdk_rdev_t **rdevp)
} }
spin_unlock_irq(&conf->device_lock); spin_unlock_irq(&conf->device_lock);
printk (KERN_ERR "raid1_map(): huh, no more operational devices?\n"); printk(KERN_ERR "raid1_map(): huh, no more operational devices?\n");
return -1; return -1;
} }
...@@ -305,7 +305,7 @@ static int end_request(struct bio *bio, unsigned int bytes_done, int error) ...@@ -305,7 +305,7 @@ static int end_request(struct bio *bio, unsigned int bytes_done, int error)
* oops, read error: * oops, read error:
*/ */
printk(KERN_ERR "raid1: %s: rescheduling sector %llu\n", printk(KERN_ERR "raid1: %s: rescheduling sector %llu\n",
bdev_partition_name(conf->mirrors[mirror].rdev->bdev), (unsigned long long)r1_bio->sector); bdev_partition_name(conf->mirrors[mirror].rdev->bdev), (unsigned long long)r1_bio->sector);
reschedule_retry(r1_bio); reschedule_retry(r1_bio);
} }
} else { } else {
...@@ -584,22 +584,6 @@ static void status(struct seq_file *seq, mddev_t *mddev) ...@@ -584,22 +584,6 @@ static void status(struct seq_file *seq, mddev_t *mddev)
seq_printf(seq, "]"); seq_printf(seq, "]");
} }
#define LAST_DISK KERN_ALERT \
"raid1: only one disk left and IO error.\n"
#define NO_SPARE_DISK KERN_ALERT \
"raid1: no spare disk left, degrading mirror level by one.\n"
#define DISK_FAILED KERN_ALERT \
"raid1: Disk failure on %s, disabling device. \n" \
" Operation continuing on %d devices\n"
#define START_SYNCING KERN_ALERT \
"raid1: start syncing spare disk.\n"
#define ALREADY_SYNCING KERN_INFO \
"raid1: syncing already in progress.\n"
static void error(mddev_t *mddev, mdk_rdev_t *rdev) static void error(mddev_t *mddev, mdk_rdev_t *rdev)
{ {
...@@ -629,7 +613,9 @@ static void error(mddev_t *mddev, mdk_rdev_t *rdev) ...@@ -629,7 +613,9 @@ static void error(mddev_t *mddev, mdk_rdev_t *rdev)
rdev->in_sync = 0; rdev->in_sync = 0;
rdev->faulty = 1; rdev->faulty = 1;
mddev->sb_dirty = 1; mddev->sb_dirty = 1;
printk(DISK_FAILED, bdev_partition_name(rdev->bdev), conf->working_disks); printk(KERN_ALERT "raid1: Disk failure on %s, disabling device. \n"
" Operation continuing on %d devices\n",
bdev_partition_name(rdev->bdev), conf->working_disks);
} }
static void print_conf(conf_t *conf) static void print_conf(conf_t *conf)
...@@ -643,14 +629,14 @@ static void print_conf(conf_t *conf) ...@@ -643,14 +629,14 @@ static void print_conf(conf_t *conf)
return; return;
} }
printk(" --- wd:%d rd:%d\n", conf->working_disks, printk(" --- wd:%d rd:%d\n", conf->working_disks,
conf->raid_disks); conf->raid_disks);
for (i = 0; i < conf->raid_disks; i++) { for (i = 0; i < conf->raid_disks; i++) {
tmp = conf->mirrors + i; tmp = conf->mirrors + i;
if (tmp->rdev) if (tmp->rdev)
printk(" disk %d, wo:%d, o:%d, dev:%s\n", printk(" disk %d, wo:%d, o:%d, dev:%s\n",
i, !tmp->rdev->in_sync, !tmp->rdev->faulty, i, !tmp->rdev->in_sync, !tmp->rdev->faulty,
bdev_partition_name(tmp->rdev->bdev)); bdev_partition_name(tmp->rdev->bdev));
} }
} }
...@@ -743,11 +729,6 @@ static int raid1_remove_disk(mddev_t *mddev, int number) ...@@ -743,11 +729,6 @@ static int raid1_remove_disk(mddev_t *mddev, int number)
return err; return err;
} }
#define IO_ERROR KERN_ALERT \
"raid1: %s: unrecoverable I/O read error for block %llu\n"
#define REDIRECT_SECTOR KERN_ERR \
"raid1: %s: redirecting sector %llu to another mirror\n"
static int end_sync_read(struct bio *bio, unsigned int bytes_done, int error) static int end_sync_read(struct bio *bio, unsigned int bytes_done, int error)
{ {
...@@ -823,7 +804,10 @@ static void sync_request_write(mddev_t *mddev, r1bio_t *r1_bio) ...@@ -823,7 +804,10 @@ static void sync_request_write(mddev_t *mddev, r1bio_t *r1_bio)
* There is no point trying a read-for-reconstruct as * There is no point trying a read-for-reconstruct as
* reconstruct is about to be aborted * reconstruct is about to be aborted
*/ */
printk(IO_ERROR, bdev_partition_name(bio->bi_bdev), (unsigned long long)r1_bio->sector); printk(KERN_ALERT "raid1: %s: unrecoverable I/O read error"
" for block %llu\n",
bdev_partition_name(bio->bi_bdev),
(unsigned long long)r1_bio->sector);
md_done_sync(mddev, r1_bio->master_bio->bi_size >> 9, 0); md_done_sync(mddev, r1_bio->master_bio->bi_size >> 9, 0);
put_buf(r1_bio); put_buf(r1_bio);
return; return;
...@@ -874,7 +858,8 @@ static void sync_request_write(mddev_t *mddev, r1bio_t *r1_bio) ...@@ -874,7 +858,8 @@ static void sync_request_write(mddev_t *mddev, r1bio_t *r1_bio)
* Nowhere to write this to... I guess we * Nowhere to write this to... I guess we
* must be done * must be done
*/ */
printk(KERN_ALERT "raid1: sync aborting as there is nowhere to write sector %llu\n", printk(KERN_ALERT "raid1: sync aborting as there is nowhere"
" to write sector %llu\n",
(unsigned long long)r1_bio->sector); (unsigned long long)r1_bio->sector);
md_done_sync(mddev, r1_bio->master_bio->bi_size >> 9, 0); md_done_sync(mddev, r1_bio->master_bio->bi_size >> 9, 0);
put_buf(r1_bio); put_buf(r1_bio);
...@@ -928,12 +913,17 @@ static void raid1d(mddev_t *mddev) ...@@ -928,12 +913,17 @@ static void raid1d(mddev_t *mddev)
case READ: case READ:
case READA: case READA:
if (map(mddev, &rdev) == -1) { if (map(mddev, &rdev) == -1) {
printk(IO_ERROR, bdev_partition_name(bio->bi_bdev), (unsigned long long)r1_bio->sector); printk(KERN_ALERT "raid1: %s: unrecoverable I/O"
" read error for block %llu\n",
bdev_partition_name(bio->bi_bdev),
(unsigned long long)r1_bio->sector);
raid_end_bio_io(r1_bio, 0); raid_end_bio_io(r1_bio, 0);
break; break;
} }
printk(REDIRECT_SECTOR, printk(KERN_ERR "raid1: %s: redirecting sector %llu to"
bdev_partition_name(rdev->bdev), (unsigned long long)r1_bio->sector); " another mirror\n",
bdev_partition_name(rdev->bdev),
(unsigned long long)r1_bio->sector);
bio->bi_bdev = rdev->bdev; bio->bi_bdev = rdev->bdev;
bio->bi_sector = r1_bio->sector + rdev->data_offset; bio->bi_sector = r1_bio->sector + rdev->data_offset;
bio->bi_rw = r1_bio->cmd; bio->bi_rw = r1_bio->cmd;
...@@ -1063,45 +1053,6 @@ static int sync_request(mddev_t *mddev, sector_t sector_nr, int go_faster) ...@@ -1063,45 +1053,6 @@ static int sync_request(mddev_t *mddev, sector_t sector_nr, int go_faster)
return nr_sectors; return nr_sectors;
} }
#define INVALID_LEVEL KERN_WARNING \
"raid1: md%d: raid level not set to mirroring (%d)\n"
#define NO_SB KERN_ERR \
"raid1: disabled mirror %s (couldn't access raid superblock)\n"
#define ERRORS KERN_ERR \
"raid1: disabled mirror %s (errors detected)\n"
#define NOT_IN_SYNC KERN_ERR \
"raid1: disabled mirror %s (not in sync)\n"
#define INCONSISTENT KERN_ERR \
"raid1: disabled mirror %s (inconsistent descriptor)\n"
#define ALREADY_RUNNING KERN_ERR \
"raid1: disabled mirror %s (mirror %d already operational)\n"
#define OPERATIONAL KERN_INFO \
"raid1: device %s operational as mirror %d\n"
#define MEM_ERROR KERN_ERR \
"raid1: couldn't allocate memory for md%d\n"
#define SPARE KERN_INFO \
"raid1: spare disk %s\n"
#define NONE_OPERATIONAL KERN_ERR \
"raid1: no operational mirrors for md%d\n"
#define ARRAY_IS_ACTIVE KERN_INFO \
"raid1: raid set md%d active with %d out of %d mirrors\n"
#define THREAD_ERROR KERN_ERR \
"raid1: couldn't allocate thread for md%d\n"
#define START_RESYNC KERN_WARNING \
"raid1: raid set md%d not clean; reconstructing mirrors\n"
static int run(mddev_t *mddev) static int run(mddev_t *mddev)
{ {
conf_t *conf; conf_t *conf;
...@@ -1110,10 +1061,9 @@ static int run(mddev_t *mddev) ...@@ -1110,10 +1061,9 @@ static int run(mddev_t *mddev)
mdk_rdev_t *rdev; mdk_rdev_t *rdev;
struct list_head *tmp; struct list_head *tmp;
MOD_INC_USE_COUNT;
if (mddev->level != 1) { if (mddev->level != 1) {
printk(INVALID_LEVEL, mdidx(mddev), mddev->level); printk("raid1: md%d: raid level not set to mirroring (%d)\n",
mdidx(mddev), mddev->level);
goto out; goto out;
} }
/* /*
...@@ -1124,7 +1074,8 @@ static int run(mddev_t *mddev) ...@@ -1124,7 +1074,8 @@ static int run(mddev_t *mddev)
conf = kmalloc(sizeof(conf_t), GFP_KERNEL); conf = kmalloc(sizeof(conf_t), GFP_KERNEL);
mddev->private = conf; mddev->private = conf;
if (!conf) { if (!conf) {
printk(MEM_ERROR, mdidx(mddev)); printk(KERN_ERR "raid1: couldn't allocate memory for md%d\n",
mdidx(mddev));
goto out; goto out;
} }
memset(conf, 0, sizeof(*conf)); memset(conf, 0, sizeof(*conf));
...@@ -1132,7 +1083,8 @@ static int run(mddev_t *mddev) ...@@ -1132,7 +1083,8 @@ static int run(mddev_t *mddev)
conf->r1bio_pool = mempool_create(NR_RAID1_BIOS, r1bio_pool_alloc, conf->r1bio_pool = mempool_create(NR_RAID1_BIOS, r1bio_pool_alloc,
r1bio_pool_free, NULL); r1bio_pool_free, NULL);
if (!conf->r1bio_pool) { if (!conf->r1bio_pool) {
printk(MEM_ERROR, mdidx(mddev)); printk(KERN_ERR "raid1: couldn't allocate memory for md%d\n",
mdidx(mddev));
goto out; goto out;
} }
...@@ -1160,7 +1112,8 @@ static int run(mddev_t *mddev) ...@@ -1160,7 +1112,8 @@ static int run(mddev_t *mddev)
init_waitqueue_head(&conf->wait_resume); init_waitqueue_head(&conf->wait_resume);
if (!conf->working_disks) { if (!conf->working_disks) {
printk(NONE_OPERATIONAL, mdidx(mddev)); printk(KERN_ERR "raid1: no operational mirrors for md%d\n",
mdidx(mddev));
goto out_free_conf; goto out_free_conf;
} }
...@@ -1190,12 +1143,16 @@ static int run(mddev_t *mddev) ...@@ -1190,12 +1143,16 @@ static int run(mddev_t *mddev)
{ {
mddev->thread = md_register_thread(raid1d, mddev, "md%d_raid1"); mddev->thread = md_register_thread(raid1d, mddev, "md%d_raid1");
if (!mddev->thread) { if (!mddev->thread) {
printk(THREAD_ERROR, mdidx(mddev)); printk(KERN_ERR
"raid1: couldn't allocate thread for md%d\n",
mdidx(mddev));
goto out_free_conf; goto out_free_conf;
} }
} }
printk(KERN_INFO
printk(ARRAY_IS_ACTIVE, mdidx(mddev), mddev->raid_disks - mddev->degraded, mddev->raid_disks); "raid1: raid set md%d active with %d out of %d mirrors\n",
mdidx(mddev), mddev->raid_disks - mddev->degraded,
mddev->raid_disks);
/* /*
* Ok, everything is just fine now * Ok, everything is just fine now
*/ */
...@@ -1207,7 +1164,6 @@ static int run(mddev_t *mddev) ...@@ -1207,7 +1164,6 @@ static int run(mddev_t *mddev)
kfree(conf); kfree(conf);
mddev->private = NULL; mddev->private = NULL;
out: out:
MOD_DEC_USE_COUNT;
return -EIO; return -EIO;
} }
...@@ -1221,13 +1177,13 @@ static int stop(mddev_t *mddev) ...@@ -1221,13 +1177,13 @@ static int stop(mddev_t *mddev)
mempool_destroy(conf->r1bio_pool); mempool_destroy(conf->r1bio_pool);
kfree(conf); kfree(conf);
mddev->private = NULL; mddev->private = NULL;
MOD_DEC_USE_COUNT;
return 0; return 0;
} }
static mdk_personality_t raid1_personality = static mdk_personality_t raid1_personality =
{ {
.name = "raid1", .name = "raid1",
.owner = THIS_MODULE,
.make_request = make_request, .make_request = make_request,
.run = run, .run = run,
.stop = stop, .stop = stop,
......
...@@ -182,7 +182,8 @@ static inline void init_stripe(struct stripe_head *sh, unsigned long sector, int ...@@ -182,7 +182,8 @@ static inline void init_stripe(struct stripe_head *sh, unsigned long sector, int
BUG(); BUG();
CHECK_DEVLOCK(); CHECK_DEVLOCK();
PRINTK("init_stripe called, stripe %llu\n", (unsigned long long)sh->sector); PRINTK("init_stripe called, stripe %llu\n",
(unsigned long long)sh->sector);
remove_hash(sh); remove_hash(sh);
...@@ -338,7 +339,9 @@ static int raid5_end_read_request (struct bio * bi, unsigned int bytes_done, ...@@ -338,7 +339,9 @@ static int raid5_end_read_request (struct bio * bi, unsigned int bytes_done,
if (bi == &sh->dev[i].req) if (bi == &sh->dev[i].req)
break; break;
PRINTK("end_read_request %llu/%d, count: %d, uptodate %d.\n", (unsigned long long)sh->sector, i, atomic_read(&sh->count), uptodate); PRINTK("end_read_request %llu/%d, count: %d, uptodate %d.\n",
(unsigned long long)sh->sector, i, atomic_read(&sh->count),
uptodate);
if (i == disks) { if (i == disks) {
BUG(); BUG();
return 0; return 0;
...@@ -409,7 +412,9 @@ static int raid5_end_write_request (struct bio *bi, unsigned int bytes_done, ...@@ -409,7 +412,9 @@ static int raid5_end_write_request (struct bio *bi, unsigned int bytes_done,
if (bi == &sh->dev[i].req) if (bi == &sh->dev[i].req)
break; break;
PRINTK("end_write_request %llu/%d, count %d, uptodate: %d.\n", (unsigned long long)sh->sector, i, atomic_read(&sh->count), uptodate); PRINTK("end_write_request %llu/%d, count %d, uptodate: %d.\n",
(unsigned long long)sh->sector, i, atomic_read(&sh->count),
uptodate);
if (i == disks) { if (i == disks) {
BUG(); BUG();
return 0; return 0;
...@@ -533,7 +538,8 @@ static unsigned long raid5_compute_sector(sector_t r_sector, unsigned int raid_d ...@@ -533,7 +538,8 @@ static unsigned long raid5_compute_sector(sector_t r_sector, unsigned int raid_d
*dd_idx = (*pd_idx + 1 + *dd_idx) % raid_disks; *dd_idx = (*pd_idx + 1 + *dd_idx) % raid_disks;
break; break;
default: default:
printk ("raid5: unsupported algorithm %d\n", conf->algorithm); printk("raid5: unsupported algorithm %d\n",
conf->algorithm);
} }
/* /*
...@@ -573,7 +579,8 @@ static sector_t compute_blocknr(struct stripe_head *sh, int i) ...@@ -573,7 +579,8 @@ static sector_t compute_blocknr(struct stripe_head *sh, int i)
i -= (sh->pd_idx + 1); i -= (sh->pd_idx + 1);
break; break;
default: default:
printk ("raid5: unsupported algorithm %d\n", conf->algorithm); printk("raid5: unsupported algorithm %d\n",
conf->algorithm);
} }
chunk_number = stripe * data_disks + i; chunk_number = stripe * data_disks + i;
...@@ -655,7 +662,8 @@ static void compute_block(struct stripe_head *sh, int dd_idx) ...@@ -655,7 +662,8 @@ static void compute_block(struct stripe_head *sh, int dd_idx)
int i, count, disks = conf->raid_disks; int i, count, disks = conf->raid_disks;
void *ptr[MAX_XOR_BLOCKS], *p; void *ptr[MAX_XOR_BLOCKS], *p;
PRINTK("compute_block, stripe %llu, idx %d\n", (unsigned long long)sh->sector, dd_idx); PRINTK("compute_block, stripe %llu, idx %d\n",
(unsigned long long)sh->sector, dd_idx);
ptr[0] = page_address(sh->dev[dd_idx].page); ptr[0] = page_address(sh->dev[dd_idx].page);
memset(ptr[0], 0, STRIPE_SIZE); memset(ptr[0], 0, STRIPE_SIZE);
...@@ -667,7 +675,9 @@ static void compute_block(struct stripe_head *sh, int dd_idx) ...@@ -667,7 +675,9 @@ static void compute_block(struct stripe_head *sh, int dd_idx)
if (test_bit(R5_UPTODATE, &sh->dev[i].flags)) if (test_bit(R5_UPTODATE, &sh->dev[i].flags))
ptr[count++] = p; ptr[count++] = p;
else else
printk("compute_block() %d, stripe %llu, %d not present\n", dd_idx, (unsigned long long)sh->sector, i); printk("compute_block() %d, stripe %llu, %d"
" not present\n", dd_idx,
(unsigned long long)sh->sector, i);
check_xor(); check_xor();
} }
...@@ -683,7 +693,8 @@ static void compute_parity(struct stripe_head *sh, int method) ...@@ -683,7 +693,8 @@ static void compute_parity(struct stripe_head *sh, int method)
void *ptr[MAX_XOR_BLOCKS]; void *ptr[MAX_XOR_BLOCKS];
struct bio *chosen; struct bio *chosen;
PRINTK("compute_parity, stripe %llu, method %d\n", (unsigned long long)sh->sector, method); PRINTK("compute_parity, stripe %llu, method %d\n",
(unsigned long long)sh->sector, method);
count = 1; count = 1;
ptr[0] = page_address(sh->dev[pd_idx].page); ptr[0] = page_address(sh->dev[pd_idx].page);
...@@ -768,7 +779,9 @@ static void add_stripe_bio (struct stripe_head *sh, struct bio *bi, int dd_idx, ...@@ -768,7 +779,9 @@ static void add_stripe_bio (struct stripe_head *sh, struct bio *bi, int dd_idx,
struct bio **bip; struct bio **bip;
raid5_conf_t *conf = sh->raid_conf; raid5_conf_t *conf = sh->raid_conf;
PRINTK("adding bh b#%llu to stripe s#%llu\n", (unsigned long long)bi->bi_sector, (unsigned long long)sh->sector); PRINTK("adding bh b#%llu to stripe s#%llu\n",
(unsigned long long)bi->bi_sector,
(unsigned long long)sh->sector);
spin_lock(&sh->lock); spin_lock(&sh->lock);
...@@ -789,7 +802,9 @@ static void add_stripe_bio (struct stripe_head *sh, struct bio *bi, int dd_idx, ...@@ -789,7 +802,9 @@ static void add_stripe_bio (struct stripe_head *sh, struct bio *bi, int dd_idx,
spin_unlock_irq(&conf->device_lock); spin_unlock_irq(&conf->device_lock);
spin_unlock(&sh->lock); spin_unlock(&sh->lock);
PRINTK("added bi b#%llu to stripe s#%llu, disk %d.\n", (unsigned long long)bi->bi_sector, (unsigned long long)sh->sector, dd_idx); PRINTK("added bi b#%llu to stripe s#%llu, disk %d.\n",
(unsigned long long)bi->bi_sector,
(unsigned long long)sh->sector, dd_idx);
if (forwrite) { if (forwrite) {
/* check if page is coverred */ /* check if page is coverred */
...@@ -838,7 +853,9 @@ static void handle_stripe(struct stripe_head *sh) ...@@ -838,7 +853,9 @@ static void handle_stripe(struct stripe_head *sh)
int failed_num=0; int failed_num=0;
struct r5dev *dev; struct r5dev *dev;
PRINTK("handling stripe %llu, cnt=%d, pd_idx=%d\n", (unsigned long long)sh->sector, atomic_read(&sh->count), sh->pd_idx); PRINTK("handling stripe %llu, cnt=%d, pd_idx=%d\n",
(unsigned long long)sh->sector, atomic_read(&sh->count),
sh->pd_idx);
spin_lock(&sh->lock); spin_lock(&sh->lock);
clear_bit(STRIPE_HANDLE, &sh->state); clear_bit(STRIPE_HANDLE, &sh->state);
...@@ -853,8 +870,8 @@ static void handle_stripe(struct stripe_head *sh) ...@@ -853,8 +870,8 @@ static void handle_stripe(struct stripe_head *sh)
clear_bit(R5_Insync, &dev->flags); clear_bit(R5_Insync, &dev->flags);
clear_bit(R5_Syncio, &dev->flags); clear_bit(R5_Syncio, &dev->flags);
PRINTK("check %d: state 0x%lx read %p write %p written %p\n", i, PRINTK("check %d: state 0x%lx read %p write %p written %p\n",
dev->flags, dev->toread, dev->towrite, dev->written); i, dev->flags, dev->toread, dev->towrite, dev->written);
/* maybe we can reply to a read */ /* maybe we can reply to a read */
if (test_bit(R5_UPTODATE, &dev->flags) && dev->toread) { if (test_bit(R5_UPTODATE, &dev->flags) && dev->toread) {
struct bio *rbi, *rbi2; struct bio *rbi, *rbi2;
...@@ -895,8 +912,9 @@ static void handle_stripe(struct stripe_head *sh) ...@@ -895,8 +912,9 @@ static void handle_stripe(struct stripe_head *sh)
} else } else
set_bit(R5_Insync, &dev->flags); set_bit(R5_Insync, &dev->flags);
} }
PRINTK("locked=%d uptodate=%d to_read=%d to_write=%d failed=%d failed_num=%d\n", PRINTK("locked=%d uptodate=%d to_read=%d"
locked, uptodate, to_read, to_write, failed, failed_num); " to_write=%d failed=%d failed_num=%d\n",
locked, uptodate, to_read, to_write, failed, failed_num);
/* check if the array has lost two devices and, if so, some requests might /* check if the array has lost two devices and, if so, some requests might
* need to be failed * need to be failed
*/ */
...@@ -1015,7 +1033,8 @@ static void handle_stripe(struct stripe_head *sh) ...@@ -1015,7 +1033,8 @@ static void handle_stripe(struct stripe_head *sh)
} }
#endif #endif
locked++; locked++;
PRINTK("Reading block %d (sync=%d)\n", i, syncing); PRINTK("Reading block %d (sync=%d)\n",
i, syncing);
if (syncing) if (syncing)
md_sync_acct(conf->disks[i].rdev, STRIPE_SECTORS); md_sync_acct(conf->disks[i].rdev, STRIPE_SECTORS);
} }
...@@ -1055,7 +1074,8 @@ static void handle_stripe(struct stripe_head *sh) ...@@ -1055,7 +1074,8 @@ static void handle_stripe(struct stripe_head *sh)
else rcw += 2*disks; else rcw += 2*disks;
} }
} }
PRINTK("for sector %llu, rmw=%d rcw=%d\n", (unsigned long long)sh->sector, rmw, rcw); PRINTK("for sector %llu, rmw=%d rcw=%d\n",
(unsigned long long)sh->sector, rmw, rcw);
set_bit(STRIPE_HANDLE, &sh->state); set_bit(STRIPE_HANDLE, &sh->state);
if (rmw < rcw && rmw > 0) if (rmw < rcw && rmw > 0)
/* prefer read-modify-write, but need to get some data */ /* prefer read-modify-write, but need to get some data */
...@@ -1204,7 +1224,8 @@ static void handle_stripe(struct stripe_head *sh) ...@@ -1204,7 +1224,8 @@ static void handle_stripe(struct stripe_head *sh)
md_sync_acct(rdev, STRIPE_SECTORS); md_sync_acct(rdev, STRIPE_SECTORS);
bi->bi_bdev = rdev->bdev; bi->bi_bdev = rdev->bdev;
PRINTK("for %llu schedule op %ld on disc %d\n", (unsigned long long)sh->sector, bi->bi_rw, i); PRINTK("for %llu schedule op %ld on disc %d\n",
(unsigned long long)sh->sector, bi->bi_rw, i);
atomic_inc(&sh->count); atomic_inc(&sh->count);
bi->bi_sector = sh->sector + rdev->data_offset; bi->bi_sector = sh->sector + rdev->data_offset;
bi->bi_flags = 1 << BIO_UPTODATE; bi->bi_flags = 1 << BIO_UPTODATE;
...@@ -1217,7 +1238,8 @@ static void handle_stripe(struct stripe_head *sh) ...@@ -1217,7 +1238,8 @@ static void handle_stripe(struct stripe_head *sh)
bi->bi_next = NULL; bi->bi_next = NULL;
generic_make_request(bi); generic_make_request(bi);
} else { } else {
PRINTK("skip op %ld on disc %d for sector %llu\n", bi->bi_rw, i, (unsigned long long)sh->sector); PRINTK("skip op %ld on disc %d for sector %llu\n",
bi->bi_rw, i, (unsigned long long)sh->sector);
clear_bit(R5_LOCKED, &sh->dev[i].flags); clear_bit(R5_LOCKED, &sh->dev[i].flags);
set_bit(STRIPE_HANDLE, &sh->state); set_bit(STRIPE_HANDLE, &sh->state);
} }
...@@ -1285,8 +1307,9 @@ static int make_request (request_queue_t *q, struct bio * bi) ...@@ -1285,8 +1307,9 @@ static int make_request (request_queue_t *q, struct bio * bi)
new_sector = raid5_compute_sector(logical_sector, new_sector = raid5_compute_sector(logical_sector,
raid_disks, data_disks, &dd_idx, &pd_idx, conf); raid_disks, data_disks, &dd_idx, &pd_idx, conf);
PRINTK("raid5: make_request, sector %Lu logical %Lu\n", PRINTK("raid5: make_request, sector %Lu logical %Lu\n",
(unsigned long long)new_sector, (unsigned long long)logical_sector); (unsigned long long)new_sector,
(unsigned long long)logical_sector);
sh = get_active_stripe(conf, new_sector, pd_idx, (bi->bi_rw&RWA_MASK)); sh = get_active_stripe(conf, new_sector, pd_idx, (bi->bi_rw&RWA_MASK));
if (sh) { if (sh) {
...@@ -1410,11 +1433,8 @@ static int run (mddev_t *mddev) ...@@ -1410,11 +1433,8 @@ static int run (mddev_t *mddev)
struct disk_info *disk; struct disk_info *disk;
struct list_head *tmp; struct list_head *tmp;
MOD_INC_USE_COUNT;
if (mddev->level != 5 && mddev->level != 4) { if (mddev->level != 5 && mddev->level != 4) {
printk("raid5: md%d: raid level not set to 4/5 (%d)\n", mdidx(mddev), mddev->level); printk("raid5: md%d: raid level not set to 4/5 (%d)\n", mdidx(mddev), mddev->level);
MOD_DEC_USE_COUNT;
return -EIO; return -EIO;
} }
...@@ -1450,7 +1470,9 @@ static int run (mddev_t *mddev) ...@@ -1450,7 +1470,9 @@ static int run (mddev_t *mddev)
disk->rdev = rdev; disk->rdev = rdev;
if (rdev->in_sync) { if (rdev->in_sync) {
printk(KERN_INFO "raid5: device %s operational as raid disk %d\n", bdev_partition_name(rdev->bdev), raid_disk); printk(KERN_INFO "raid5: device %s operational as raid"
" disk %d\n", bdev_partition_name(rdev->bdev),
raid_disk);
conf->working_disks++; conf->working_disks++;
} }
} }
...@@ -1467,48 +1489,62 @@ static int run (mddev_t *mddev) ...@@ -1467,48 +1489,62 @@ static int run (mddev_t *mddev)
conf->max_nr_stripes = NR_STRIPES; conf->max_nr_stripes = NR_STRIPES;
if (!conf->chunk_size || conf->chunk_size % 4) { if (!conf->chunk_size || conf->chunk_size % 4) {
printk(KERN_ERR "raid5: invalid chunk size %d for md%d\n", conf->chunk_size, mdidx(mddev)); printk(KERN_ERR "raid5: invalid chunk size %d for md%d\n",
conf->chunk_size, mdidx(mddev));
goto abort; goto abort;
} }
if (conf->algorithm > ALGORITHM_RIGHT_SYMMETRIC) { if (conf->algorithm > ALGORITHM_RIGHT_SYMMETRIC) {
printk(KERN_ERR "raid5: unsupported parity algorithm %d for md%d\n", conf->algorithm, mdidx(mddev)); printk(KERN_ERR
"raid5: unsupported parity algorithm %d for md%d\n",
conf->algorithm, mdidx(mddev));
goto abort; goto abort;
} }
if (mddev->degraded > 1) { if (mddev->degraded > 1) {
printk(KERN_ERR "raid5: not enough operational devices for md%d (%d/%d failed)\n", mdidx(mddev), conf->failed_disks, conf->raid_disks); printk(KERN_ERR "raid5: not enough operational devices for md%d"
" (%d/%d failed)\n",
mdidx(mddev), conf->failed_disks, conf->raid_disks);
goto abort; goto abort;
} }
if (mddev->degraded == 1 && if (mddev->degraded == 1 &&
mddev->recovery_cp != MaxSector) { mddev->recovery_cp != MaxSector) {
printk(KERN_ERR "raid5: cannot start dirty degraded array for md%d\n", mdidx(mddev)); printk(KERN_ERR
"raid5: cannot start dirty degraded array for md%d\n",
mdidx(mddev));
goto abort; goto abort;
} }
{ {
mddev->thread = md_register_thread(raid5d, mddev, "md%d_raid5"); mddev->thread = md_register_thread(raid5d, mddev, "md%d_raid5");
if (!mddev->thread) { if (!mddev->thread) {
printk(KERN_ERR "raid5: couldn't allocate thread for md%d\n", mdidx(mddev)); printk(KERN_ERR
"raid5: couldn't allocate thread for md%d\n",
mdidx(mddev));
goto abort; goto abort;
} }
} }
memory = conf->max_nr_stripes * (sizeof(struct stripe_head) +
memory = conf->max_nr_stripes * (sizeof(struct stripe_head) +
conf->raid_disks * ((sizeof(struct bio) + PAGE_SIZE))) / 1024; conf->raid_disks * ((sizeof(struct bio) + PAGE_SIZE))) / 1024;
if (grow_stripes(conf, conf->max_nr_stripes)) { if (grow_stripes(conf, conf->max_nr_stripes)) {
printk(KERN_ERR "raid5: couldn't allocate %dkB for buffers\n", memory); printk(KERN_ERR
"raid5: couldn't allocate %dkB for buffers\n", memory);
shrink_stripes(conf); shrink_stripes(conf);
md_unregister_thread(mddev->thread); md_unregister_thread(mddev->thread);
goto abort; goto abort;
} else } else
printk(KERN_INFO "raid5: allocated %dkB for md%d\n", memory, mdidx(mddev)); printk(KERN_INFO "raid5: allocated %dkB for md%d\n",
memory, mdidx(mddev));
if (mddev->degraded == 0) if (mddev->degraded == 0)
printk("raid5: raid level %d set md%d active with %d out of %d devices, algorithm %d\n", conf->level, mdidx(mddev), printk("raid5: raid level %d set md%d active with %d out of %d"
mddev->raid_disks-mddev->degraded, mddev->raid_disks, conf->algorithm); " devices, algorithm %d\n", conf->level, mdidx(mddev),
mddev->raid_disks-mddev->degraded, mddev->raid_disks,
conf->algorithm);
else else
printk(KERN_ALERT "raid5: raid level %d set md%d active with %d out of %d devices, algorithm %d\n", conf->level, mdidx(mddev), printk(KERN_ALERT "raid5: raid level %d set md%d active with %d"
mddev->raid_disks - mddev->degraded, mddev->raid_disks, conf->algorithm); " out of %d devices, algorithm %d\n", conf->level,
mdidx(mddev), mddev->raid_disks - mddev->degraded,
mddev->raid_disks, conf->algorithm);
print_raid5_conf(conf); print_raid5_conf(conf);
...@@ -1524,7 +1560,6 @@ static int run (mddev_t *mddev) ...@@ -1524,7 +1560,6 @@ static int run (mddev_t *mddev)
} }
mddev->private = NULL; mddev->private = NULL;
printk(KERN_ALERT "raid5: failed to run raid set md%d\n", mdidx(mddev)); printk(KERN_ALERT "raid5: failed to run raid set md%d\n", mdidx(mddev));
MOD_DEC_USE_COUNT;
return -EIO; return -EIO;
} }
...@@ -1540,7 +1575,6 @@ static int stop (mddev_t *mddev) ...@@ -1540,7 +1575,6 @@ static int stop (mddev_t *mddev)
free_pages((unsigned long) conf->stripe_hashtbl, HASH_PAGES_ORDER); free_pages((unsigned long) conf->stripe_hashtbl, HASH_PAGES_ORDER);
kfree(conf); kfree(conf);
mddev->private = NULL; mddev->private = NULL;
MOD_DEC_USE_COUNT;
return 0; return 0;
} }
...@@ -1549,11 +1583,14 @@ static void print_sh (struct stripe_head *sh) ...@@ -1549,11 +1583,14 @@ static void print_sh (struct stripe_head *sh)
{ {
int i; int i;
printk("sh %llu, pd_idx %d, state %ld.\n", (unsigned long long)sh->sector, sh->pd_idx, sh->state); printk("sh %llu, pd_idx %d, state %ld.\n",
printk("sh %llu, count %d.\n", (unsigned long long)sh->sector, atomic_read(&sh->count)); (unsigned long long)sh->sector, sh->pd_idx, sh->state);
printk("sh %llu, count %d.\n",
(unsigned long long)sh->sector, atomic_read(&sh->count));
printk("sh %llu, ", (unsigned long long)sh->sector); printk("sh %llu, ", (unsigned long long)sh->sector);
for (i = 0; i < sh->raid_conf->raid_disks; i++) { for (i = 0; i < sh->raid_conf->raid_disks; i++) {
printk("(cache%d: %p %ld) ", i, sh->dev[i].page, sh->dev[i].flags); printk("(cache%d: %p %ld) ",
i, sh->dev[i].page, sh->dev[i].flags);
} }
printk("\n"); printk("\n");
} }
...@@ -1693,6 +1730,7 @@ static int raid5_add_disk(mddev_t *mddev, mdk_rdev_t *rdev) ...@@ -1693,6 +1730,7 @@ static int raid5_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)
static mdk_personality_t raid5_personality= static mdk_personality_t raid5_personality=
{ {
.name = "raid5", .name = "raid5",
.owner = THIS_MODULE,
.make_request = make_request, .make_request = make_request,
.run = run, .run = run,
.stop = stop, .stop = stop,
......
...@@ -23,7 +23,9 @@ ...@@ -23,7 +23,9 @@
* 2002-01-17 Adam Belay <ambx1@neo.rr.com> * 2002-01-17 Adam Belay <ambx1@neo.rr.com>
* Updated to latest pnp code * Updated to latest pnp code
* *
*/ * 2003-01-31 Alan Cox <alan@redhat.com>
* Cleaned up locking, delay code, general odds and ends
*/
#include <linux/module.h> /* Modules */ #include <linux/module.h> /* Modules */
#include <linux/init.h> /* Initdata */ #include <linux/init.h> /* Initdata */
...@@ -43,11 +45,11 @@ static int users=0; ...@@ -43,11 +45,11 @@ static int users=0;
static int curtuner=0; static int curtuner=0;
static int tunestat=0; static int tunestat=0;
static int sigstrength=0; static int sigstrength=0;
static wait_queue_head_t tunerq,rdsq,readq; static wait_queue_head_t readq;
struct timer_list tunertimer,rdstimer,readtimer; struct timer_list tunertimer,rdstimer,readtimer;
static __u8 rdsin=0,rdsout=0,rdsstat=0; static __u8 rdsin=0,rdsout=0,rdsstat=0;
static unsigned char rdsbuf[RDS_BUFFER]; static unsigned char rdsbuf[RDS_BUFFER];
static int cadet_lock=0; static spinlock_t cadet_io_lock;
static int cadet_probe(void); static int cadet_probe(void);
...@@ -58,37 +60,19 @@ static int cadet_probe(void); ...@@ -58,37 +60,19 @@ static int cadet_probe(void);
*/ */
static __u16 sigtable[2][4]={{5,10,30,150},{28,40,63,1000}}; static __u16 sigtable[2][4]={{5,10,30,150},{28,40,63,1000}};
static void cadet_wake(unsigned long qnum)
{
switch(qnum) {
case 0: /* cadet_setfreq */
wake_up(&tunerq);
break;
case 1: /* cadet_getrds */
wake_up(&rdsq);
break;
}
}
static int cadet_getrds(void) static int cadet_getrds(void)
{ {
int rdsstat=0; int rdsstat=0;
cadet_lock++; spin_lock(&cadet_io_lock);
outb(3,io); /* Select Decoder Control/Status */ outb(3,io); /* Select Decoder Control/Status */
outb(inb(io+1)&0x7f,io+1); /* Reset RDS detection */ outb(inb(io+1)&0x7f,io+1); /* Reset RDS detection */
cadet_lock--; spin_unlock(&cadet_io_lock);
init_timer(&rdstimer);
rdstimer.function=cadet_wake;
rdstimer.data=(unsigned long)1;
rdstimer.expires=jiffies+(HZ/10);
init_waitqueue_head(&rdsq);
add_timer(&rdstimer);
sleep_on(&rdsq);
cadet_lock++; set_current_state(TASK_UNINTERRUPTIBLE);
schedule_timeout(HZ/10);
spin_lock(&cadet_io_lock);
outb(3,io); /* Select Decoder Control/Status */ outb(3,io); /* Select Decoder Control/Status */
if((inb(io+1)&0x80)!=0) { if((inb(io+1)&0x80)!=0) {
rdsstat|=VIDEO_TUNER_RDS_ON; rdsstat|=VIDEO_TUNER_RDS_ON;
...@@ -96,32 +80,24 @@ static int cadet_getrds(void) ...@@ -96,32 +80,24 @@ static int cadet_getrds(void)
if((inb(io+1)&0x10)!=0) { if((inb(io+1)&0x10)!=0) {
rdsstat|=VIDEO_TUNER_MBS_ON; rdsstat|=VIDEO_TUNER_MBS_ON;
} }
cadet_lock--; spin_unlock(&cadet_io_lock);
return rdsstat; return rdsstat;
} }
static int cadet_getstereo(void) static int cadet_getstereo(void)
{ {
if(curtuner!=0) { /* Only FM has stereo capability! */ int ret = 0;
if(curtuner != 0) /* Only FM has stereo capability! */
return 0; return 0;
}
cadet_lock++; spin_lock(&cadet_io_lock);
outb(7,io); /* Select tuner control */ outb(7,io); /* Select tuner control */
if((inb(io+1)&0x40)==0) { if( (inb(io+1) & 0x40) == 0)
cadet_lock--; ret = 1;
return 1; /* Stereo pilot detected */ spin_unlock(&cadet_io_lock);
} return ret;
else {
cadet_lock--;
return 0; /* Mono */
}
} }
static unsigned cadet_gettune(void) static unsigned cadet_gettune(void)
{ {
int curvol,i; int curvol,i;
...@@ -130,7 +106,9 @@ static unsigned cadet_gettune(void) ...@@ -130,7 +106,9 @@ static unsigned cadet_gettune(void)
/* /*
* Prepare for read * Prepare for read
*/ */
cadet_lock++;
spin_lock(&cadet_io_lock);
outb(7,io); /* Select tuner control */ outb(7,io); /* Select tuner control */
curvol=inb(io+1); /* Save current volume/mute setting */ curvol=inb(io+1); /* Save current volume/mute setting */
outb(0x00,io+1); /* Ensure WRITE-ENABLE is LOW */ outb(0x00,io+1); /* Ensure WRITE-ENABLE is LOW */
...@@ -152,13 +130,11 @@ static unsigned cadet_gettune(void) ...@@ -152,13 +130,11 @@ static unsigned cadet_gettune(void)
* Restore volume/mute setting * Restore volume/mute setting
*/ */
outb(curvol,io+1); outb(curvol,io+1);
cadet_lock--; spin_unlock(&cadet_io_lock);
return fifo; return fifo;
} }
static unsigned cadet_getfreq(void) static unsigned cadet_getfreq(void)
{ {
int i; int i;
...@@ -191,14 +167,13 @@ static unsigned cadet_getfreq(void) ...@@ -191,14 +167,13 @@ static unsigned cadet_getfreq(void)
return freq; return freq;
} }
static void cadet_settune(unsigned fifo) static void cadet_settune(unsigned fifo)
{ {
int i; int i;
unsigned test; unsigned test;
cadet_lock++; spin_lock(&cadet_io_lock);
outb(7,io); /* Select tuner control */ outb(7,io); /* Select tuner control */
/* /*
* Write the shift register * Write the shift register
...@@ -217,11 +192,9 @@ static void cadet_settune(unsigned fifo) ...@@ -217,11 +192,9 @@ static void cadet_settune(unsigned fifo)
test=0x1c|((fifo>>23)&0x02); test=0x1c|((fifo>>23)&0x02);
outb(test,io+1); outb(test,io+1);
} }
cadet_lock--; spin_unlock(&cadet_io_lock);
} }
static void cadet_setfreq(unsigned freq) static void cadet_setfreq(unsigned freq)
{ {
unsigned fifo; unsigned fifo;
...@@ -253,92 +226,90 @@ static void cadet_setfreq(unsigned freq) ...@@ -253,92 +226,90 @@ static void cadet_setfreq(unsigned freq)
/* /*
* Save current volume/mute setting * Save current volume/mute setting
*/ */
cadet_lock++;
spin_lock(&cadet_io_lock);
outb(7,io); /* Select tuner control */ outb(7,io); /* Select tuner control */
curvol=inb(io+1); curvol=inb(io+1);
spin_unlock(&cadet_io_lock);
/* /*
* Tune the card * Tune the card
*/ */
for(j=3;j>-1;j--) { for(j=3;j>-1;j--) {
cadet_settune(fifo|(j<<16)); cadet_settune(fifo|(j<<16));
spin_lock(&cadet_io_lock);
outb(7,io); /* Select tuner control */ outb(7,io); /* Select tuner control */
outb(curvol,io+1); outb(curvol,io+1);
cadet_lock--; spin_unlock(&cadet_io_lock);
init_timer(&tunertimer);
tunertimer.function=cadet_wake; set_current_state(TASK_UNINTERRUPTIBLE);
tunertimer.data=(unsigned long)0; schedule_timeout(HZ/10);
tunertimer.expires=jiffies+(HZ/10);
init_waitqueue_head(&tunerq);
add_timer(&tunertimer);
sleep_on(&tunerq);
cadet_gettune(); cadet_gettune();
if((tunestat&0x40)==0) { /* Tuned */ if((tunestat & 0x40) == 0) { /* Tuned */
sigstrength=sigtable[curtuner][j]; sigstrength=sigtable[curtuner][j];
return; return;
} }
cadet_lock++;
} }
cadet_lock--;
sigstrength=0; sigstrength=0;
} }
static int cadet_getvol(void) static int cadet_getvol(void)
{ {
cadet_lock++; int ret = 0;
spin_lock(&cadet_io_lock);
outb(7,io); /* Select tuner control */ outb(7,io); /* Select tuner control */
if((inb(io+1)&0x20)!=0) { if((inb(io + 1) & 0x20) != 0)
cadet_lock--; ret = 0xffff;
return 0xffff;
} spin_unlock(&cadet_io_lock);
else { return ret;
cadet_lock--;
return 0;
}
} }
static void cadet_setvol(int vol) static void cadet_setvol(int vol)
{ {
cadet_lock++; spin_lock(&cadet_io_lock);
outb(7,io); /* Select tuner control */ outb(7,io); /* Select tuner control */
if(vol>0) { if(vol>0)
outb(0x20,io+1); outb(0x20,io+1);
} else
else {
outb(0x00,io+1); outb(0x00,io+1);
} spin_unlock(&cadet_io_lock);
cadet_lock--;
} }
void cadet_handler(unsigned long data) void cadet_handler(unsigned long data)
{ {
/* /*
* Service the RDS fifo * Service the RDS fifo
*/ */
if(cadet_lock==0) {
if(spin_trylock(&cadet_io_lock))
{
outb(0x3,io); /* Select RDS Decoder Control */ outb(0x3,io); /* Select RDS Decoder Control */
if((inb(io+1)&0x20)!=0) { if((inb(io+1)&0x20)!=0) {
printk(KERN_CRIT "cadet: RDS fifo overflow\n"); printk(KERN_CRIT "cadet: RDS fifo overflow\n");
} }
outb(0x80,io); /* Select RDS fifo */ outb(0x80,io); /* Select RDS fifo */
while((inb(io)&0x80)!=0) { while((inb(io)&0x80)!=0) {
rdsbuf[rdsin++]=inb(io+1); rdsbuf[rdsin]=inb(io+1);
if(rdsin==rdsout) { if(rdsin==rdsout)
printk(KERN_CRIT "cadet: RDS buffer overflow\n"); printk(KERN_WARNING "cadet: RDS buffer overflow\n");
} else
rdsin++;
} }
spin_unlock(&cadet_io_lock);
} }
/* /*
* Service pending read * Service pending read
*/ */
if( rdsin!=rdsout) { if( rdsin!=rdsout)
wake_up_interruptible(&readq); wake_up_interruptible(&readq);
}
/* /*
* Clean up and exit * Clean up and exit
...@@ -359,10 +330,10 @@ static ssize_t cadet_read(struct file *file, char *data, ...@@ -359,10 +330,10 @@ static ssize_t cadet_read(struct file *file, char *data,
unsigned char readbuf[RDS_BUFFER]; unsigned char readbuf[RDS_BUFFER];
if(rdsstat==0) { if(rdsstat==0) {
cadet_lock++; spin_lock(&cadet_io_lock);
rdsstat=1; rdsstat=1;
outb(0x80,io); /* Select RDS fifo */ outb(0x80,io); /* Select RDS fifo */
cadet_lock--; spin_unlock(&cadet_io_lock);
init_timer(&readtimer); init_timer(&readtimer);
readtimer.function=cadet_handler; readtimer.function=cadet_handler;
readtimer.data=(unsigned long)0; readtimer.data=(unsigned long)0;
...@@ -370,14 +341,13 @@ static ssize_t cadet_read(struct file *file, char *data, ...@@ -370,14 +341,13 @@ static ssize_t cadet_read(struct file *file, char *data,
add_timer(&readtimer); add_timer(&readtimer);
} }
if(rdsin==rdsout) { if(rdsin==rdsout) {
if (file->f_flags & O_NONBLOCK) { if (file->f_flags & O_NONBLOCK)
return -EWOULDBLOCK; return -EWOULDBLOCK;
}
interruptible_sleep_on(&readq); interruptible_sleep_on(&readq);
} }
while((i<count)&&(rdsin!=rdsout)) { while( i<count && rdsin!=rdsout)
readbuf[i++]=rdsbuf[rdsout++]; readbuf[i++]=rdsbuf[rdsout++];
}
if (copy_to_user(data,readbuf,i)) if (copy_to_user(data,readbuf,i))
return -EFAULT; return -EFAULT;
return i; return i;
...@@ -515,10 +485,8 @@ static int cadet_open(struct inode *inode, struct file *file) ...@@ -515,10 +485,8 @@ static int cadet_open(struct inode *inode, struct file *file)
static int cadet_release(struct inode *inode, struct file *file) static int cadet_release(struct inode *inode, struct file *file)
{ {
if(rdsstat==1) { del_timer_sync(&readtimer);
del_timer(&readtimer); rdsstat=0;
rdsstat=0;
}
users--; users--;
return 0; return 0;
} }
...@@ -595,13 +563,15 @@ static int cadet_probe(void) ...@@ -595,13 +563,15 @@ static int cadet_probe(void)
return -1; return -1;
} }
/* /*
* io should only be set if the user has used something like * io should only be set if the user has used something like
* isapnp (the userspace program) to initialize this card for us * isapnp (the userspace program) to initialize this card for us
*/ */
static int __init cadet_init(void) static int __init cadet_init(void)
{ {
spin_lock_init(&cadet_io_lock);
/* /*
* If a probe was requested then probe ISAPnP first (safest) * If a probe was requested then probe ISAPnP first (safest)
*/ */
......
...@@ -677,7 +677,7 @@ static int tw_chrdev_ioctl(struct inode *inode, struct file *file, unsigned int ...@@ -677,7 +677,7 @@ static int tw_chrdev_ioctl(struct inode *inode, struct file *file, unsigned int
dprintk(KERN_WARNING "3w-xxxx: tw_chrdev_ioctl(): caught TW_AEN_LISTEN.\n"); dprintk(KERN_WARNING "3w-xxxx: tw_chrdev_ioctl(): caught TW_AEN_LISTEN.\n");
memset(tw_ioctl->data_buffer, 0, tw_ioctl->data_buffer_length); memset(tw_ioctl->data_buffer, 0, tw_ioctl->data_buffer_length);
spin_lock_irqsave(&tw_dev->host->host_lock, flags); spin_lock_irqsave(tw_dev->host->host_lock, flags);
if (tw_dev->aen_head == tw_dev->aen_tail) { if (tw_dev->aen_head == tw_dev->aen_tail) {
tw_aen_code = TW_AEN_QUEUE_EMPTY; tw_aen_code = TW_AEN_QUEUE_EMPTY;
} else { } else {
...@@ -688,7 +688,7 @@ static int tw_chrdev_ioctl(struct inode *inode, struct file *file, unsigned int ...@@ -688,7 +688,7 @@ static int tw_chrdev_ioctl(struct inode *inode, struct file *file, unsigned int
tw_dev->aen_head = tw_dev->aen_head + 1; tw_dev->aen_head = tw_dev->aen_head + 1;
} }
} }
spin_unlock_irqrestore(&tw_dev->tw_lock, flags); spin_unlock_irqrestore(tw_dev->host->host_lock, flags);
memcpy(tw_ioctl->data_buffer, &tw_aen_code, sizeof(tw_aen_code)); memcpy(tw_ioctl->data_buffer, &tw_aen_code, sizeof(tw_aen_code));
break; break;
case TW_CMD_PACKET_WITH_DATA: case TW_CMD_PACKET_WITH_DATA:
......
...@@ -372,14 +372,25 @@ config V850E_NB85E_UART_CONSOLE ...@@ -372,14 +372,25 @@ config V850E_NB85E_UART_CONSOLE
bool "Use NEC V850E on-chip UART for console" bool "Use NEC V850E on-chip UART for console"
depends on V850E_NB85E_UART depends on V850E_NB85E_UART
config SERIAL98
tristate "PC-9800 8251-based primary serial port support"
depends on X86_PC9800
help
If you want to use standard primary serial ports on PC-9800,
say Y. Otherwise, say N.
config SERIAL98_CONSOLE
bool "Support for console on PC-9800 standard serial port"
depends on SERIAL98=y
config SERIAL_CORE config SERIAL_CORE
tristate tristate
default m if SERIAL_AMBA!=y && SERIAL_CLPS711X!=y && SERIAL_21285!=y && !SERIAL_SA1100 && !SERIAL_ANAKIN && !SERIAL_UART00 && SERIAL_8250!=y && SERIAL_MUX!=y && !SERIAL_ROCKETPORT && !SERIAL_SUNCORE && !V850E_NB85E_UART && (SERIAL_AMBA=m || SERIAL_CLPS711X=m || SERIAL_21285=m || SERIAL_8250=m || SERIAL_MUX=m) default m if SERIAL_AMBA!=y && SERIAL_CLPS711X!=y && SERIAL_21285!=y && !SERIAL_SA1100 && !SERIAL_ANAKIN && !SERIAL_UART00 && SERIAL_8250!=y && SERIAL_MUX!=y && !SERIAL_ROCKETPORT && !SERIAL_SUNCORE && !V850E_NB85E_UART && (SERIAL_AMBA=m || SERIAL_CLPS711X=m || SERIAL_21285=m || SERIAL_8250=m || SERIAL_MUX=m || SERIAL98=m)
default y if SERIAL_AMBA=y || SERIAL_CLPS711X=y || SERIAL_21285=y || SERIAL_SA1100 || SERIAL_ANAKIN || SERIAL_UART00 || SERIAL_8250=y || SERIAL_MUX=y || SERIAL_ROCKETPORT || SERIAL_SUNCORE || V850E_NB85E_UART default y if SERIAL_AMBA=y || SERIAL_CLPS711X=y || SERIAL_21285=y || SERIAL_SA1100 || SERIAL_ANAKIN || SERIAL_UART00 || SERIAL_8250=y || SERIAL_MUX=y || SERIAL_ROCKETPORT || SERIAL_SUNCORE || V850E_NB85E_UART || SERIAL98=y
config SERIAL_CORE_CONSOLE config SERIAL_CORE_CONSOLE
bool bool
depends on SERIAL_AMBA_CONSOLE || SERIAL_CLPS711X_CONSOLE || SERIAL_21285_CONSOLE || SERIAL_SA1100_CONSOLE || SERIAL_ANAKIN_CONSOLE || SERIAL_UART00_CONSOLE || SERIAL_8250_CONSOLE || SERIAL_MUX_CONSOLE || SERIAL_SUNCORE || V850E_NB85E_UART_CONSOLE depends on SERIAL_AMBA_CONSOLE || SERIAL_CLPS711X_CONSOLE || SERIAL_21285_CONSOLE || SERIAL_SA1100_CONSOLE || SERIAL_ANAKIN_CONSOLE || SERIAL_UART00_CONSOLE || SERIAL_8250_CONSOLE || SERIAL_MUX_CONSOLE || SERIAL_SUNCORE || V850E_NB85E_UART_CONSOLE || SERIAL98_CONSOLE
default y default y
config SERIAL_68328 config SERIAL_68328
......
...@@ -27,3 +27,4 @@ obj-$(CONFIG_SERIAL_68328) += 68328serial.o ...@@ -27,3 +27,4 @@ obj-$(CONFIG_SERIAL_68328) += 68328serial.o
obj-$(CONFIG_SERIAL_68360) += 68360serial.o obj-$(CONFIG_SERIAL_68360) += 68360serial.o
obj-$(CONFIG_SERIAL_COLDFIRE) += mcfserial.o obj-$(CONFIG_SERIAL_COLDFIRE) += mcfserial.o
obj-$(CONFIG_V850E_NB85E_UART) += nb85e_uart.o obj-$(CONFIG_V850E_NB85E_UART) += nb85e_uart.o
obj-$(CONFIG_SERIAL98) += serial98.o
...@@ -43,6 +43,7 @@ static DECLARE_MUTEX(read_mutex); ...@@ -43,6 +43,7 @@ static DECLARE_MUTEX(read_mutex);
static struct inode *get_cramfs_inode(struct super_block *sb, struct cramfs_inode * cramfs_inode) static struct inode *get_cramfs_inode(struct super_block *sb, struct cramfs_inode * cramfs_inode)
{ {
struct inode * inode = new_inode(sb); struct inode * inode = new_inode(sb);
static struct timespec zerotime = { 0, 0 };
if (inode) { if (inode) {
inode->i_mode = cramfs_inode->mode; inode->i_mode = cramfs_inode->mode;
...@@ -51,7 +52,8 @@ static struct inode *get_cramfs_inode(struct super_block *sb, struct cramfs_inod ...@@ -51,7 +52,8 @@ static struct inode *get_cramfs_inode(struct super_block *sb, struct cramfs_inod
inode->i_blocks = (cramfs_inode->size - 1) / 512 + 1; inode->i_blocks = (cramfs_inode->size - 1) / 512 + 1;
inode->i_blksize = PAGE_CACHE_SIZE; inode->i_blksize = PAGE_CACHE_SIZE;
inode->i_gid = cramfs_inode->gid; inode->i_gid = cramfs_inode->gid;
inode->i_mtime = inode->i_atime = inode->i_ctime = 0; /* Struct copy intentional */
inode->i_mtime = inode->i_atime = inode->i_ctime = zerotime;
inode->i_ino = CRAMINO(cramfs_inode); inode->i_ino = CRAMINO(cramfs_inode);
/* inode->i_nlink is left 1 - arguably wrong for directories, /* inode->i_nlink is left 1 - arguably wrong for directories,
but it's the best we can do without reading the directory but it's the best we can do without reading the directory
......
...@@ -38,7 +38,6 @@ ...@@ -38,7 +38,6 @@
#include <linux/binfmts.h> #include <linux/binfmts.h>
#include <linux/swap.h> #include <linux/swap.h>
#include <linux/utsname.h> #include <linux/utsname.h>
#define __NO_VERSION__
#include <linux/module.h> #include <linux/module.h>
#include <linux/namei.h> #include <linux/namei.h>
#include <linux/proc_fs.h> #include <linux/proc_fs.h>
......
...@@ -496,13 +496,19 @@ exp_parent(svc_client *clp, struct vfsmount *mnt, struct dentry *dentry, ...@@ -496,13 +496,19 @@ exp_parent(svc_client *clp, struct vfsmount *mnt, struct dentry *dentry,
{ {
svc_export *exp; svc_export *exp;
read_lock(&dparent_lock); dget(dentry);
exp = exp_get_by_name(clp, mnt, dentry, reqp); exp = exp_get_by_name(clp, mnt, dentry, reqp);
while (exp == NULL && dentry != dentry->d_parent) { while (exp == NULL && dentry != dentry->d_parent) {
dentry = dentry->d_parent; struct dentry *parent;
read_lock(&dparent_lock);
parent = dget(dentry->d_parent);
dput(dentry);
dentry = parent;
read_unlock(&dparent_lock);
exp = exp_get_by_name(clp, mnt, dentry, reqp); exp = exp_get_by_name(clp, mnt, dentry, reqp);
} }
read_unlock(&dparent_lock); dput(dentry);
return exp; return exp;
} }
......
...@@ -231,6 +231,7 @@ xdr_error: \ ...@@ -231,6 +231,7 @@ xdr_error: \
p += XDR_QUADLEN(nbytes); \ p += XDR_QUADLEN(nbytes); \
} while (0) } while (0)
/* READ_BUF, read_buf(): nbytes must be <= PAGE_SIZE */
#define READ_BUF(nbytes) do { \ #define READ_BUF(nbytes) do { \
if (nbytes <= (u32)((char *)argp->end - (char *)argp->p)) { \ if (nbytes <= (u32)((char *)argp->end - (char *)argp->p)) { \
p = argp->p; \ p = argp->p; \
...@@ -244,15 +245,15 @@ xdr_error: \ ...@@ -244,15 +245,15 @@ xdr_error: \
u32 *read_buf(struct nfsd4_compoundargs *argp, int nbytes) u32 *read_buf(struct nfsd4_compoundargs *argp, int nbytes)
{ {
/* We want more bytes than seem to be available. /* We want more bytes than seem to be available.
* Maybe we need a new page, may wehave just run out * Maybe we need a new page, maybe we have just run out
*/ */
int avail = (char*)argp->end - (char*)argp->p; int avail = (char*)argp->end - (char*)argp->p;
u32 *p; u32 *p;
if (avail + argp->pagelen < nbytes) if (avail + argp->pagelen < nbytes)
return NULL; return NULL;
if (avail + PAGE_SIZE > nbytes) /* need more than a page !! */ if (avail + PAGE_SIZE < nbytes) /* need more than a page !! */
return NULL; return NULL;
/* ok, we can do it with the tail plus the next page */ /* ok, we can do it with the current plus the next page */
if (nbytes <= sizeof(argp->tmp)) if (nbytes <= sizeof(argp->tmp))
p = argp->tmp; p = argp->tmp;
else { else {
...@@ -851,16 +852,16 @@ nfsd4_decode_write(struct nfsd4_compoundargs *argp, struct nfsd4_write *write) ...@@ -851,16 +852,16 @@ nfsd4_decode_write(struct nfsd4_compoundargs *argp, struct nfsd4_write *write)
v++; v++;
write->wr_vec[v].iov_base = page_address(argp->pagelist[0]); write->wr_vec[v].iov_base = page_address(argp->pagelist[0]);
argp->pagelist++; argp->pagelist++;
if (argp->pagelen >= PAGE_SIZE) { if (len >= PAGE_SIZE) {
write->wr_vec[v].iov_len = PAGE_SIZE; write->wr_vec[v].iov_len = PAGE_SIZE;
argp->pagelen -= PAGE_SIZE; argp->pagelen -= PAGE_SIZE;
} else { } else {
write->wr_vec[v].iov_len = argp->pagelen; write->wr_vec[v].iov_len = argp->pagelen;
argp->pagelen = 0; argp->pagelen -= len;
} }
} }
argp->end = (u32*) (write->wr_vec[v].iov_base + write->wr_vec[v].iov_len); argp->end = (u32*) (write->wr_vec[v].iov_base + write->wr_vec[v].iov_len);
argp->p = (u32*) (write->wr_vec[v].iov_base + len); argp->p = (u32*) (write->wr_vec[v].iov_base + (XDR_QUADLEN(len) << 2));
write->wr_vec[v].iov_len = len; write->wr_vec[v].iov_len = len;
write->wr_vlen = v+1; write->wr_vlen = v+1;
...@@ -1690,7 +1691,8 @@ nfsd4_encode_read(struct nfsd4_compoundres *resp, int nfserr, struct nfsd4_read ...@@ -1690,7 +1691,8 @@ nfsd4_encode_read(struct nfsd4_compoundres *resp, int nfserr, struct nfsd4_read
{ {
u32 eof; u32 eof;
int v, pn; int v, pn;
unsigned long maxcount, len; unsigned long maxcount;
long len;
ENCODE_HEAD; ENCODE_HEAD;
if (nfserr) if (nfserr)
...@@ -1731,6 +1733,10 @@ nfsd4_encode_read(struct nfsd4_compoundres *resp, int nfserr, struct nfsd4_read ...@@ -1731,6 +1733,10 @@ nfsd4_encode_read(struct nfsd4_compoundres *resp, int nfserr, struct nfsd4_read
resp->xbuf->page_len = maxcount; resp->xbuf->page_len = maxcount;
/* read zero bytes -> don't set up tail */
if(!maxcount)
return 0;
/* set up page for remaining responses */ /* set up page for remaining responses */
svc_take_page(resp->rqstp); svc_take_page(resp->rqstp);
resp->xbuf->tail[0].iov_base = resp->xbuf->tail[0].iov_base =
......
...@@ -221,12 +221,17 @@ asmlinkage long sys_quotactl(unsigned int cmd, const char *special, qid_t id, ca ...@@ -221,12 +221,17 @@ asmlinkage long sys_quotactl(unsigned int cmd, const char *special, qid_t id, ca
uint cmds, type; uint cmds, type;
struct super_block *sb = NULL; struct super_block *sb = NULL;
struct block_device *bdev; struct block_device *bdev;
char *tmp;
int ret = -ENODEV; int ret = -ENODEV;
cmds = cmd >> SUBCMDSHIFT; cmds = cmd >> SUBCMDSHIFT;
type = cmd & SUBCMDMASK; type = cmd & SUBCMDMASK;
bdev = lookup_bdev(special); tmp = getname(special);
if (IS_ERR(tmp))
return PTR_ERR(tmp);
bdev = lookup_bdev(tmp);
putname(tmp);
if (IS_ERR(bdev)) if (IS_ERR(bdev))
return PTR_ERR(bdev); return PTR_ERR(bdev);
sb = get_super(bdev); sb = get_super(bdev);
......
...@@ -26,6 +26,9 @@ ...@@ -26,6 +26,9 @@
static __inline__ int ide_default_irq(unsigned long base) static __inline__ int ide_default_irq(unsigned long base)
{ {
switch (base) { switch (base) {
#ifdef CONFIG_X86_PC9800
case 0x640: return 9;
#endif
case 0x1f0: return 14; case 0x1f0: return 14;
case 0x170: return 15; case 0x170: return 15;
case 0x1e8: return 11; case 0x1e8: return 11;
...@@ -40,12 +43,17 @@ static __inline__ int ide_default_irq(unsigned long base) ...@@ -40,12 +43,17 @@ static __inline__ int ide_default_irq(unsigned long base)
static __inline__ unsigned long ide_default_io_base(int index) static __inline__ unsigned long ide_default_io_base(int index)
{ {
switch (index) { switch (index) {
#ifdef CONFIG_X86_PC9800
case 0:
case 1: return 0x640;
#else
case 0: return 0x1f0; case 0: return 0x1f0;
case 1: return 0x170; case 1: return 0x170;
case 2: return 0x1e8; case 2: return 0x1e8;
case 3: return 0x168; case 3: return 0x168;
case 4: return 0x1e0; case 4: return 0x1e0;
case 5: return 0x160; case 5: return 0x160;
#endif
default: default:
return 0; return 0;
} }
...@@ -56,13 +64,24 @@ static __inline__ void ide_init_hwif_ports(hw_regs_t *hw, unsigned long data_por ...@@ -56,13 +64,24 @@ static __inline__ void ide_init_hwif_ports(hw_regs_t *hw, unsigned long data_por
{ {
unsigned long reg = data_port; unsigned long reg = data_port;
int i; int i;
#ifdef CONFIG_X86_PC9800
unsigned long increment = data_port == 0x640 ? 2 : 1;
#endif
for (i = IDE_DATA_OFFSET; i <= IDE_STATUS_OFFSET; i++) { for (i = IDE_DATA_OFFSET; i <= IDE_STATUS_OFFSET; i++) {
hw->io_ports[i] = reg; hw->io_ports[i] = reg;
#ifdef CONFIG_X86_PC9800
reg += increment;
#else
reg += 1; reg += 1;
#endif
} }
if (ctrl_port) { if (ctrl_port) {
hw->io_ports[IDE_CONTROL_OFFSET] = ctrl_port; hw->io_ports[IDE_CONTROL_OFFSET] = ctrl_port;
#ifdef CONFIG_X86_PC9800
} else if (data_port == 0x640) {
hw->io_ports[IDE_CONTROL_OFFSET] = 0x74c;
#endif
} else { } else {
hw->io_ports[IDE_CONTROL_OFFSET] = hw->io_ports[IDE_DATA_OFFSET] + 0x206; hw->io_ports[IDE_CONTROL_OFFSET] = hw->io_ports[IDE_DATA_OFFSET] + 0x206;
} }
......
...@@ -510,9 +510,9 @@ long __strncpy_from_user(char *dst, const char *src, long count); ...@@ -510,9 +510,9 @@ long __strncpy_from_user(char *dst, const char *src, long count);
* *
* Context: User context only. This function may sleep. * Context: User context only. This function may sleep.
* *
* Get the size of a NULL-terminated string in user space. * Get the size of a NUL-terminated string in user space.
* *
* Returns the size of the string INCLUDING the terminating NULL. * Returns the size of the string INCLUDING the terminating NUL.
* On exception, returns 0. * On exception, returns 0.
* *
* If there is a limit on the length of a valid string, you may wish to * If there is a limit on the length of a valid string, you may wish to
......
...@@ -51,7 +51,7 @@ extern const char _sb_findmap[]; ...@@ -51,7 +51,7 @@ extern const char _sb_findmap[];
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
/* /*
* SMP save set_bit routine based on compare and swap (CS) * SMP safe set_bit routine based on compare and swap (CS)
*/ */
static inline void set_bit_cs(int nr, volatile unsigned long *ptr) static inline void set_bit_cs(int nr, volatile unsigned long *ptr)
{ {
...@@ -76,7 +76,7 @@ static inline void set_bit_cs(int nr, volatile unsigned long *ptr) ...@@ -76,7 +76,7 @@ static inline void set_bit_cs(int nr, volatile unsigned long *ptr)
} }
/* /*
* SMP save clear_bit routine based on compare and swap (CS) * SMP safe clear_bit routine based on compare and swap (CS)
*/ */
static inline void clear_bit_cs(int nr, volatile unsigned long *ptr) static inline void clear_bit_cs(int nr, volatile unsigned long *ptr)
{ {
...@@ -101,7 +101,7 @@ static inline void clear_bit_cs(int nr, volatile unsigned long *ptr) ...@@ -101,7 +101,7 @@ static inline void clear_bit_cs(int nr, volatile unsigned long *ptr)
} }
/* /*
* SMP save change_bit routine based on compare and swap (CS) * SMP safe change_bit routine based on compare and swap (CS)
*/ */
static inline void change_bit_cs(int nr, volatile unsigned long *ptr) static inline void change_bit_cs(int nr, volatile unsigned long *ptr)
{ {
...@@ -126,7 +126,7 @@ static inline void change_bit_cs(int nr, volatile unsigned long *ptr) ...@@ -126,7 +126,7 @@ static inline void change_bit_cs(int nr, volatile unsigned long *ptr)
} }
/* /*
* SMP save test_and_set_bit routine based on compare and swap (CS) * SMP safe test_and_set_bit routine based on compare and swap (CS)
*/ */
static inline int static inline int
test_and_set_bit_cs(int nr, volatile unsigned long *ptr) test_and_set_bit_cs(int nr, volatile unsigned long *ptr)
...@@ -153,7 +153,7 @@ test_and_set_bit_cs(int nr, volatile unsigned long *ptr) ...@@ -153,7 +153,7 @@ test_and_set_bit_cs(int nr, volatile unsigned long *ptr)
} }
/* /*
* SMP save test_and_clear_bit routine based on compare and swap (CS) * SMP safe test_and_clear_bit routine based on compare and swap (CS)
*/ */
static inline int static inline int
test_and_clear_bit_cs(int nr, volatile unsigned long *ptr) test_and_clear_bit_cs(int nr, volatile unsigned long *ptr)
...@@ -180,7 +180,7 @@ test_and_clear_bit_cs(int nr, volatile unsigned long *ptr) ...@@ -180,7 +180,7 @@ test_and_clear_bit_cs(int nr, volatile unsigned long *ptr)
} }
/* /*
* SMP save test_and_change_bit routine based on compare and swap (CS) * SMP safe test_and_change_bit routine based on compare and swap (CS)
*/ */
static inline int static inline int
test_and_change_bit_cs(int nr, volatile unsigned long *ptr) test_and_change_bit_cs(int nr, volatile unsigned long *ptr)
......
...@@ -254,6 +254,7 @@ struct mddev_s ...@@ -254,6 +254,7 @@ struct mddev_s
struct mdk_personality_s struct mdk_personality_s
{ {
char *name; char *name;
struct module *owner;
int (*make_request)(request_queue_t *q, struct bio *bio); int (*make_request)(request_queue_t *q, struct bio *bio);
int (*run)(mddev_t *mddev); int (*run)(mddev_t *mddev);
int (*stop)(mddev_t *mddev); int (*stop)(mddev_t *mddev);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment