Commit ad866e3a authored by Linus Torvalds's avatar Linus Torvalds

Merge http://gkernel.bkbits.net/net-drivers-2.5

into home.transmeta.com:/home/torvalds/v2.5/linux
parents 011d6325 10315105
......@@ -974,8 +974,6 @@ void __init stram_swap_setup(char *str, int *ints)
/* ST-RAM device */
/* ------------------------------------------------------------------------ */
static int stram_blocksizes[14] = {
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4096 };
static int stram_sizes[14] = {
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
static int refcnt = 0;
......@@ -1064,7 +1062,6 @@ int __init stram_device_init(void)
}
blk_init_queue(BLK_DEFAULT_QUEUE(STRAM_MAJOR), do_stram_request);
blksize_size[STRAM_MAJOR] = stram_blocksizes;
stram_sizes[STRAM_MINOR] = (swap_end - swap_start)/1024;
blk_size[STRAM_MAJOR] = stram_sizes;
register_disk(NULL, MKDEV(STRAM_MAJOR, STRAM_MINOR), 1, &stram_fops,
......
......@@ -202,7 +202,6 @@ static struct archy_disk_type {
#define MAX_DISK_SIZE 720
static int floppy_sizes[256];
static int floppy_blocksizes[256];
/* current info on each unit */
static struct archy_floppy_struct {
......@@ -1614,7 +1613,6 @@ int fd1772_init(void)
floppy_sizes[i] = MAX_DISK_SIZE;
blk_size[MAJOR_NR] = floppy_sizes;
blksize_size[MAJOR_NR] = floppy_blocksizes;
blk_init_queue(BLK_DEFAULT_QUEUE(MAJOR_NR), do_fd_request);
config_types();
......
......@@ -195,7 +195,6 @@ struct mfm_info {
static struct hd_struct mfm[MFM_MAXDRIVES << 6];
static int mfm_sizes[MFM_MAXDRIVES << 6];
static int mfm_blocksizes[MFM_MAXDRIVES << 6];
static DECLARE_WAIT_QUEUE_HEAD(mfm_wait_open);
/* Stuff from the assembly routines */
......@@ -1315,12 +1314,6 @@ static void mfm_geninit (void)
{
int i;
for (i = 0; i < (MFM_MAXDRIVES << 6); i++) {
/* Can't increase this - if you do all hell breaks loose */
mfm_blocksizes[i] = 1024;
}
blksize_size[MAJOR_NR] = mfm_blocksizes;
mfm_drives = mfm_initdrives();
printk("mfm: detected %d hard drive%s\n", mfm_drives,
......
......@@ -1962,7 +1962,6 @@ static boolean DAC960_RegisterBlockDevice(DAC960_Controller_T *Controller)
Controller->MaxBlocksPerCommand;
Controller->GenericDiskInfo.part = Controller->DiskPartitions;
Controller->GenericDiskInfo.sizes = Controller->PartitionSizes;
blksize_size[MajorNumber] = Controller->BlockSizes;
/*
Complete initialization of the Generic Disk Information structure.
*/
......@@ -2044,10 +2043,6 @@ static void DAC960_ComputeGenericDiskInfo(DAC960_Controller_T *Controller)
LogicalDeviceInfo->ConfigurableDeviceSize;
else GenericDiskInfo->part[MinorNumber].nr_sects = 0;
}
for (i = 0; i < DAC960_MaxPartitions; i++)
if (GenericDiskInfo->part[MinorNumber].nr_sects > 0)
Controller->BlockSizes[MinorNumber + i] = BLOCK_SIZE;
else Controller->BlockSizes[MinorNumber + i] = 0;
}
}
......
......@@ -2474,7 +2474,6 @@ typedef struct DAC960_Controller
} FW;
DiskPartition_T DiskPartitions[DAC960_MinorCount];
int PartitionSizes[DAC960_MinorCount];
int BlockSizes[DAC960_MinorCount];
unsigned char ProgressBuffer[DAC960_ProgressBufferSize];
unsigned char UserStatusBuffer[DAC960_UserMessageSize];
}
......
......@@ -243,7 +243,6 @@ unsigned long phys_acsi_buffer;
static int NDevices = 0;
static int acsi_sizes[MAX_DEV<<4] = { 0, };
static int acsi_blocksizes[MAX_DEV<<4] = { 0, };
static struct hd_struct acsi_part[MAX_DEV<<4] = { {0,0}, };
static int access_count[MAX_DEV] = { 0, };
static char busy[MAX_DEV] = { 0, };
......@@ -1738,9 +1737,6 @@ static void acsi_geninit(void)
NDevices, n_slm );
#endif
for( i = 0; i < (MAX_DEV << 4); i++ )
acsi_blocksizes[i] = 1024;
blksize_size[MAJOR_NR] = acsi_blocksizes;
for( i = 0; i < NDevices; ++i )
register_disk(&acsi_gendisk, mk_kdev(MAJOR_NR,i<<4),
(acsi_info[i].type==HARDDISK)?1<<4:1,
......
......@@ -141,7 +141,6 @@ static int num_dr_types = sizeof(drive_types) / sizeof(drive_types[0]);
/* defaults for 3 1/2" HD-Disks */
static int floppy_sizes[256]={880,880,880,880,720,720,720,720,};
static int floppy_blocksizes[256];
/* hardsector size assumed to be 512 */
static int amiga_read(int), dos_read(int);
......@@ -1858,7 +1857,6 @@ int __init amiga_floppy_init(void)
post_write_timer.function = post_write;
blk_init_queue(BLK_DEFAULT_QUEUE(MAJOR_NR), do_fd_request, &amiflop_lock);
blksize_size[MAJOR_NR] = floppy_blocksizes;
blk_size[MAJOR_NR] = floppy_sizes;
for (i = 0; i < 128; i++)
......
......@@ -220,7 +220,6 @@ static struct atari_disk_type user_params[FD_MAX_UNITS];
static struct atari_disk_type default_params[FD_MAX_UNITS];
static int floppy_sizes[256];
static int floppy_blocksizes[256];
/* current info on each unit */
static struct atari_floppy_struct {
......@@ -2012,7 +2011,6 @@ int __init atari_floppy_init (void)
floppy_sizes[i] = MAX_DISK_SIZE;
blk_size[MAJOR_NR] = floppy_sizes;
blksize_size[MAJOR_NR] = floppy_blocksizes;
blk_init_queue(BLK_DEFAULT_QUEUE(MAJOR_NR), do_fd_request, &ataflop_lock);
printk(KERN_INFO "Atari floppy driver: max. %cD, %strack buffering\n",
......
......@@ -241,7 +241,7 @@ int blk_ioctl(struct block_device *bdev, unsigned int cmd, unsigned long arg)
case BLKFRASET:
if(!capable(CAP_SYS_ADMIN))
return -EACCES;
ra_pages = blk_get_ra_pages(dev);
ra_pages = blk_get_ra_pages(bdev);
if (ra_pages == NULL)
return -ENOTTY;
*ra_pages = (arg * 512) / PAGE_CACHE_SIZE;
......@@ -251,7 +251,7 @@ int blk_ioctl(struct block_device *bdev, unsigned int cmd, unsigned long arg)
case BLKFRAGET:
if (!arg)
return -EINVAL;
ra_pages = blk_get_ra_pages(dev);
ra_pages = blk_get_ra_pages(bdev);
if (ra_pages == NULL)
return -ENOTTY;
return put_user((*ra_pages * PAGE_CACHE_SIZE) / 512,
......@@ -304,7 +304,7 @@ int blk_ioctl(struct block_device *bdev, unsigned int cmd, unsigned long arg)
case BLKBSZGET:
/* get the logical block size (cf. BLKSSZGET) */
intval = block_size(dev);
intval = block_size(bdev);
return put_user(intval, (int *) arg);
case BLKBSZSET:
......@@ -320,7 +320,7 @@ int blk_ioctl(struct block_device *bdev, unsigned int cmd, unsigned long arg)
return -EINVAL;
if (bd_claim(bdev, &holder) < 0)
return -EBUSY;
set_blocksize(dev, intval);
set_blocksize(bdev, intval);
bd_release(bdev);
return 0;
......
......@@ -349,11 +349,6 @@ static void cciss_geninit( int ctlr)
continue;
hba[ctlr]->hd[i << NWD_SHIFT].nr_sects =
hba[ctlr]->sizes[i << NWD_SHIFT] = drv->nr_blocks;
/* for each partition */
for(j=0; j<MAX_PART; j++)
hba[ctlr]->blocksizes[(i<<NWD_SHIFT) + j] = 1024;
//hba[ctlr]->gendisk.nr_real++;
(BLK_DEFAULT_QUEUE(MAJOR_NR + ctlr))->hardsect_size = drv->block_size;
}
......@@ -834,7 +829,6 @@ static int revalidate_allvol(kdev_t dev)
*/
memset(hba[ctlr]->hd, 0, sizeof(struct hd_struct) * 256);
memset(hba[ctlr]->sizes, 0, sizeof(int) * 256);
memset(hba[ctlr]->blocksizes, 0, sizeof(int) * 256);
memset(hba[ctlr]->drv, 0, sizeof(drive_info_struct)
* CISS_MAX_LUN);
hba[ctlr]->gendisk.nr_real = 0;
......@@ -1343,9 +1337,6 @@ static int register_new_disk(kdev_t dev, int ctlr)
invalidate_device(kdev, 1);
gdev->part[minor].start_sect = 0;
gdev->part[minor].nr_sects = 0;
/* reset the blocksize so we can read the partition table */
blksize_size[MAJOR_NR+ctlr][minor] = 1024;
}
++hba[ctlr]->num_luns;
......@@ -2540,9 +2531,6 @@ static int __init cciss_init_one(struct pci_dev *pdev,
blk_queue_max_sectors(q, 512);
/* fill in the other Kernel structs */
blksize_size[MAJOR_NR+i] = hba[i]->blocksizes;
/* Fill in the gendisk data */
hba[i]->gendisk.major = MAJOR_NR + i;
hba[i]->gendisk.major_name = "cciss";
......
......@@ -85,7 +85,6 @@ struct ctlr_info
// indexed by minor numbers
struct hd_struct hd[256];
int sizes[256];
int blocksizes[256];
#ifdef CONFIG_CISS_SCSI_TAPE
void *scsi_ctlr; /* ptr to structure containing scsi related stuff */
#endif
......
......@@ -102,7 +102,6 @@ static struct board_type products[] = {
static struct hd_struct * ida;
static int * ida_sizes;
static int * ida_blocksizes;
static struct gendisk ida_gendisk[MAX_CTLR];
static struct proc_dir_entry *proc_array;
......@@ -178,10 +177,6 @@ static void ida_geninit(int ctlr)
ida_sizes[(ctlr<<CTLR_SHIFT) + (i<<NWD_SHIFT)] =
drv->nr_blks;
for(j=0; j<16; j++)
ida_blocksizes[(ctlr<<CTLR_SHIFT) + (i<<NWD_SHIFT)+j] =
1024;
(BLK_DEFAULT_QUEUE(MAJOR_NR + ctlr))->hardsect_size = drv->blk_size;
ida_gendisk[ctlr].nr_real++;
}
......@@ -343,7 +338,6 @@ void cleanup_module(void)
remove_proc_entry("cpqarray", proc_root_driver);
kfree(ida);
kfree(ida_sizes);
kfree(ida_blocksizes);
}
#endif /* MODULE */
......@@ -384,18 +378,8 @@ int __init cpqarray_init(void)
return(num_cntlrs_reg);
}
ida_blocksizes = kmalloc(sizeof(int)*nr_ctlr*NWD*16, GFP_KERNEL);
if(ida_blocksizes==NULL)
{
kfree(ida);
kfree(ida_sizes);
printk( KERN_ERR "cpqarray: out of memory");
return(num_cntlrs_reg);
}
memset(ida, 0, sizeof(struct hd_struct)*nr_ctlr*NWD*16);
memset(ida_sizes, 0, sizeof(int)*nr_ctlr*NWD*16);
memset(ida_blocksizes, 0, sizeof(int)*nr_ctlr*NWD*16);
memset(ida_gendisk, 0, sizeof(struct gendisk)*MAX_CTLR);
/*
......@@ -453,7 +437,6 @@ int __init cpqarray_init(void)
{
kfree(ida);
kfree(ida_sizes);
kfree(ida_blocksizes);
}
return(num_cntlrs_reg);
......@@ -481,8 +464,6 @@ int __init cpqarray_init(void)
/* This is a driver limit and could be eliminated. */
blk_queue_max_phys_segments(q, SG_MAX);
blksize_size[MAJOR_NR+i] = ida_blocksizes + (i*256);
ida_gendisk[i].major = MAJOR_NR + i;
ida_gendisk[i].major_name = "ida";
ida_gendisk[i].minor_shift = NWD_SHIFT;
......@@ -1512,7 +1493,6 @@ static int revalidate_allvol(kdev_t dev)
*/
memset(ida+(ctlr*256), 0, sizeof(struct hd_struct)*NWD*16);
memset(ida_sizes+(ctlr*256), 0, sizeof(int)*NWD*16);
memset(ida_blocksizes+(ctlr*256), 0, sizeof(int)*NWD*16);
memset(hba[ctlr]->drv, 0, sizeof(drv_info_t)*NWD);
ida_gendisk[ctlr].nr_real = 0;
......
......@@ -478,7 +478,6 @@ static struct floppy_struct *current_type[N_DRIVE];
static struct floppy_struct user_params[N_DRIVE];
static int floppy_sizes[256];
static int floppy_blocksizes[256];
/*
* The driver is trying to determine the correct media format
......@@ -3881,6 +3880,12 @@ static int floppy_revalidate(kdev_t dev)
if (cf)
UDRS->generation++;
if (NO_GEOM){
#if 0
/*
* What the devil is going on here? We are not guaranteed to do
* any IO and ENXIO case is nothing but ENOMEM in disguise - it
* happens if and only if buffer cache is out of memory. WTF?
*/
/* auto-sensing */
int size = floppy_blocksizes[minor(dev)];
if (!size)
......@@ -3895,6 +3900,9 @@ static int floppy_revalidate(kdev_t dev)
wait_on_buffer(bh);
brelse(bh);
return 0;
#endif
process_fd_request();
return 0;
}
if (cf)
poll_drive(0, FD_RAW_NEED_DISK);
......@@ -4183,7 +4191,6 @@ int __init floppy_init(void)
floppy_sizes[i] = MAX_DISK_SIZE;
blk_size[MAJOR_NR] = floppy_sizes;
blksize_size[MAJOR_NR] = floppy_blocksizes;
blk_init_queue(BLK_DEFAULT_QUEUE(MAJOR_NR), do_fd_request, &floppy_lock);
reschedule_timeout(MAXTIMEOUT, "floppy init", MAXTIMEOUT);
config_types();
......
......@@ -70,15 +70,6 @@ struct blk_dev_struct blk_dev[MAX_BLKDEV]; /* initialized by blk_dev_init() */
*/
int * blk_size[MAX_BLKDEV];
/*
* blksize_size contains the size of all block-devices:
*
* blksize_size[MAJOR][MINOR]
*
* if (!blksize_size[MAJOR]) then 1024 bytes is assumed.
*/
int * blksize_size[MAX_BLKDEV];
/*
* How many reqeusts do we allocate per queue,
* and how many do we "batch" on freeing them?
......@@ -117,10 +108,10 @@ inline request_queue_t *blk_get_queue(kdev_t dev)
*
* Will return NULL if the request queue cannot be located.
*/
unsigned long *blk_get_ra_pages(kdev_t dev)
unsigned long *blk_get_ra_pages(struct block_device *bdev)
{
unsigned long *ret = NULL;
request_queue_t *q = blk_get_queue(dev);
request_queue_t *q = blk_get_queue(to_kdev_t(bdev->bd_dev));
if (q)
ret = &q->ra_pages;
......
......@@ -81,7 +81,6 @@
static int max_loop = 8;
static struct loop_device *loop_dev;
static int *loop_sizes;
static int *loop_blksizes;
static devfs_handle_t devfs_handle; /* For the directory */
/*
......@@ -284,7 +283,7 @@ static int lo_receive(struct loop_device *lo, struct bio *bio, int bsize, loff_t
static inline int loop_get_bs(struct loop_device *lo)
{
return block_size(to_kdev_t(lo->lo_device->bd_dev));
return block_size(lo->lo_device);
}
static inline unsigned long loop_get_iv(struct loop_device *lo,
......@@ -649,7 +648,7 @@ static int loop_set_fd(struct loop_device *lo, struct file *lo_file,
lo->old_gfp_mask = inode->i_mapping->gfp_mask;
inode->i_mapping->gfp_mask = GFP_NOIO;
set_blocksize(dev, block_size(to_kdev_t(lo_device->bd_dev)));
set_blocksize(bdev, block_size(lo_device));
lo->lo_bio = lo->lo_biotail = NULL;
kernel_thread(loop_thread, lo, CLONE_FS | CLONE_FILES | CLONE_SIGHAND);
......@@ -989,10 +988,6 @@ int __init loop_init(void)
if (!loop_sizes)
goto out_mem;
loop_blksizes = kmalloc(max_loop * sizeof(int), GFP_KERNEL);
if (!loop_blksizes)
goto out_mem;
blk_queue_make_request(BLK_DEFAULT_QUEUE(MAJOR_NR), loop_make_request);
blk_queue_bounce_limit(BLK_DEFAULT_QUEUE(MAJOR_NR), BLK_BOUNCE_HIGH);
......@@ -1007,9 +1002,7 @@ int __init loop_init(void)
}
memset(loop_sizes, 0, max_loop * sizeof(int));
memset(loop_blksizes, 0, max_loop * sizeof(int));
blk_size[MAJOR_NR] = loop_sizes;
blksize_size[MAJOR_NR] = loop_blksizes;
for (i = 0; i < max_loop; i++)
register_disk(NULL, mk_kdev(MAJOR_NR, i), 1, &lo_fops, 0);
......@@ -1031,7 +1024,6 @@ void loop_exit(void)
kfree(loop_dev);
kfree(loop_sizes);
kfree(loop_blksizes);
}
module_init(loop_init);
......
......@@ -518,7 +518,6 @@ static int __init nbd_init(void)
#ifdef MODULE
printk("nbd: registered device at major %d\n", MAJOR_NR);
#endif
blksize_size[MAJOR_NR] = nbd_blksizes;
blk_size[MAJOR_NR] = nbd_sizes;
blk_init_queue(BLK_DEFAULT_QUEUE(MAJOR_NR), do_nbd_request, &nbd_lock);
for (i = 0; i < MAX_NBD; i++) {
......
......@@ -223,8 +223,6 @@ static void do_pcd_read_drq(void);
static void do_pcd_request(request_queue_t * q);
static void do_pcd_read(void);
static int pcd_blocksizes[PCD_UNITS];
struct pcd_unit {
struct pi_adapter pia; /* interface to paride layer */
struct pi_adapter *pi;
......@@ -357,9 +355,6 @@ int pcd_init (void) /* preliminary initialisation */
blk_init_queue(BLK_DEFAULT_QUEUE(MAJOR_NR), do_pcd_request, &pcd_lock);
for (i=0;i<PCD_UNITS;i++) pcd_blocksizes[i] = 1024;
blksize_size[MAJOR_NR] = pcd_blocksizes;
return 0;
}
......
......@@ -286,7 +286,6 @@ static void pd_eject( int unit);
static struct hd_struct pd_hd[PD_DEVS];
static int pd_sizes[PD_DEVS];
static int pd_blocksizes[PD_DEVS];
#define PD_NAMELEN 8
......@@ -400,9 +399,6 @@ int pd_init (void)
pd_gendisk.major_name = name;
add_gendisk(&pd_gendisk);
for(i=0;i<PD_DEVS;i++) pd_blocksizes[i] = 1024;
blksize_size[MAJOR_NR] = pd_blocksizes;
printk("%s: %s version %s, major %d, cluster %d, nice %d\n",
name,name,PD_VERSION,major,cluster,nice);
pd_init_units();
......
......@@ -265,8 +265,6 @@ static void pf_lock(int unit, int func);
static void pf_eject(int unit);
static int pf_check_media(kdev_t dev);
static int pf_blocksizes[PF_UNITS];
#define PF_NM 0
#define PF_RO 1
#define PF_RW 2
......@@ -362,8 +360,6 @@ int pf_init (void) /* preliminary initialisation */
blk_queue_max_phys_segments(q, cluster);
blk_queue_max_hw_segments(q, cluster);
for (i=0;i<PF_UNITS;i++) pf_blocksizes[i] = 1024;
blksize_size[MAJOR_NR] = pf_blocksizes;
for (i=0;i<PF_UNITS;i++)
register_disk(NULL, mk_kdev(MAJOR_NR, i), 1, &pf_fops, 0);
......
......@@ -115,7 +115,6 @@ static int no_int_yet;
static int access_count[MAX_HD];
static char ps2esdi_valid[MAX_HD];
static int ps2esdi_sizes[MAX_HD << 6];
static int ps2esdi_blocksizes[MAX_HD << 6];
static int ps2esdi_drives;
static struct hd_struct ps2esdi[MAX_HD << 6];
static u_short io_base;
......@@ -413,12 +412,8 @@ static void __init ps2esdi_geninit(void)
ps2esdi_gendisk.nr_real = ps2esdi_drives;
for (i = 0; i < (MAX_HD << 6); i++)
ps2esdi_blocksizes[i] = 1024;
request_dma(dma_arb_level, "ed");
request_region(io_base, 4, "ed");
blksize_size[MAJOR_NR] = ps2esdi_blocksizes;
blk_queue_max_sectors(BLK_DEFAULT_QUEUE(MAJOR_NR), 128);
for (i = 0; i < ps2esdi_drives; i++) {
......
......@@ -73,8 +73,6 @@ int initrd_below_start_ok;
*/
static unsigned long rd_length[NUM_RAMDISKS]; /* Size of RAM disks in bytes */
static int rd_hardsec[NUM_RAMDISKS]; /* Size of real blocks in bytes */
static int rd_blocksizes[NUM_RAMDISKS]; /* Size of 1024 byte blocks :) */
static int rd_kbsize[NUM_RAMDISKS]; /* Size in blocks of 1024 bytes */
static devfs_handle_t devfs_handle;
static struct block_device *rd_bdev[NUM_RAMDISKS];/* Protected device data */
......@@ -430,8 +428,6 @@ static int __init rd_init (void)
for (i = 0; i < NUM_RAMDISKS; i++) {
/* rd_size is given in kB */
rd_length[i] = rd_size << 10;
rd_hardsec[i] = rd_blocksize;
rd_blocksizes[i] = rd_blocksize;
rd_kbsize[i] = rd_size;
}
devfs_handle = devfs_mk_dir (NULL, "rd", NULL);
......@@ -451,7 +447,6 @@ static int __init rd_init (void)
INITRD_MINOR, S_IFBLK | S_IRUSR, &rd_bd_op, NULL);
#endif
blksize_size[MAJOR_NR] = rd_blocksizes; /* Avoid set_blocksize() check */
blk_size[MAJOR_NR] = rd_kbsize; /* Size of the RAM disk in kB */
/* rd_size is given in kB */
......
......@@ -35,7 +35,6 @@
#include <linux/blk.h>
#include <linux/devfs_fs_kernel.h>
static int floppy_blocksizes[2] = {512,512};
static int floppy_sizes[2] = {2880,2880};
#define MAX_FLOPPIES 2
......@@ -1034,7 +1033,6 @@ int swim3_init(void)
}
blk_init_queue(BLK_DEFAULT_QUEUE(MAJOR_NR), do_fd_request,
&swim3_lock);
blksize_size[MAJOR_NR] = floppy_blocksizes;
blk_size[MAJOR_NR] = floppy_sizes;
}
......
......@@ -81,7 +81,6 @@ static int floppy_count;
static struct floppy_state floppy_states[MAX_FLOPPIES];
static int floppy_blocksizes[2] = {512,512};
static int floppy_sizes[2] = {2880,2880};
static spinlock_t swim_iop_lock = SPIN_LOCK_UNLOCKED;
......@@ -151,7 +150,6 @@ int swimiop_init(void)
}
blk_init_queue(BLK_DEFAULT_QUEUE(MAJOR_NR), do_fd_request,
&swim_iop_lock);
blksize_size[MAJOR_NR] = floppy_blocksizes;
blk_size[MAJOR_NR] = floppy_sizes;
printk("SWIM-IOP: %s by Joshua M. Thompson (funaho@jurai.org)\n",
......
......@@ -120,7 +120,6 @@ static unsigned int xd_bases[] __initdata =
static struct hd_struct xd_struct[XD_MAXDRIVES << 6];
static int xd_sizes[XD_MAXDRIVES << 6], xd_access[XD_MAXDRIVES];
static int xd_blocksizes[XD_MAXDRIVES << 6];
static spinlock_t xd_lock = SPIN_LOCK_UNLOCKED;
......@@ -207,9 +206,6 @@ static void __init xd_geninit (void)
u_char i,controller;
unsigned int address;
for(i=0;i<(XD_MAXDRIVES << 6);i++) xd_blocksizes[i] = 1024;
blksize_size[MAJOR_NR] = xd_blocksizes;
if (xd_detect(&controller,&address)) {
printk("Detected a%s controller (type %d) at address %06x\n",
......
......@@ -61,7 +61,6 @@ extern struct mem_info m68k_memory[NUM_MEMINFO];
static u_long *z2ram_map = NULL;
static u_long z2ram_size = 0;
static int z2_blocksizes[Z2MINOR_COUNT];
static int z2_sizes[Z2MINOR_COUNT];
static int z2_count = 0;
static int chip_count = 0;
......@@ -361,13 +360,11 @@ z2_init( void )
int i;
for (i = 0; i < Z2MINOR_COUNT; i++) {
z2_blocksizes[ i ] = 1024;
z2_sizes[ i ] = 0;
}
}
blk_init_queue(BLK_DEFAULT_QUEUE(MAJOR_NR), DEVICE_REQUES, &z2ram_lock);
blksize_size[ MAJOR_NR ] = z2_blocksizes;
blk_size[ MAJOR_NR ] = z2_sizes;
return 0;
......
......@@ -191,8 +191,6 @@
#include <asm/io.h>
#include <asm/uaccess.h>
static int aztcd_blocksizes[1] = { 2048 };
/*###########################################################################
Defines
......@@ -1926,7 +1924,7 @@ int __init aztcd_init(void)
return -EIO;
}
blk_init_queue(BLK_DEFAULT_QUEUE(MAJOR_NR), do_aztcd_request, &aztSpin);
blksize_size[MAJOR_NR] = aztcd_blocksizes;
blk_queue_hardsect_size(BLK_DEFAULT_QUEUE(MAJOR_NR), 2048);
register_disk(NULL, mk_kdev(MAJOR_NR, 0), 1, &azt_fops, 0);
if ((azt_port == 0x1f0) || (azt_port == 0x170))
......
......@@ -3301,8 +3301,6 @@ __setup("cdu31a=", cdu31a_setup);
#endif
static int cdu31a_block_size;
/*
* Initialize the driver.
*/
......@@ -3442,9 +3440,6 @@ int __init cdu31a_init(void)
blk_init_queue(BLK_DEFAULT_QUEUE(MAJOR_NR),
do_cdu31a_request,
&cdu31a_lock);
cdu31a_block_size = 1024; /* 1kB default block size */
/* use 'mount -o block=2048' */
blksize_size[MAJOR_NR] = &cdu31a_block_size;
init_timer(&cdu31a_abort_timer);
cdu31a_abort_timer.function = handle_abort_timeout;
......
......@@ -261,8 +261,6 @@ struct toc_struct { /* private copy of Table of Contents */
uch track, fsm[3], q0;
};
static int cm206_blocksizes[1] = { 2048 };
struct cm206_struct {
volatile ush intr_ds; /* data status read on last interrupt */
volatile ush intr_ls; /* uart line status read on last interrupt */
......@@ -1502,7 +1500,7 @@ int __init cm206_init(void)
devfs_plain_cdrom(&cm206_info, &cm206_bdops);
blk_init_queue(BLK_DEFAULT_QUEUE(MAJOR_NR), do_cm206_request,
&cm206_lock);
blksize_size[MAJOR_NR] = cm206_blocksizes;
blk_queue_hardsect_size(BLK_DEFAULT_QUEUE(MAJOR_NR), 2048);
init_bh(CM206_BH, cm206_bh);
memset(cd, 0, sizeof(*cd)); /* give'm some reasonable value */
......
......@@ -74,8 +74,6 @@
#define gscd_port gscd /* for compatible parameter passing with "insmod" */
#include "gscd.h"
static int gscd_blocksizes[1] = { 512 };
static int gscdPresent = 0;
static unsigned char gscd_buf[2048]; /* buffer for block size conversion */
......@@ -1021,7 +1019,6 @@ int __init my_gscd_init(void)
S_IFBLK | S_IRUGO | S_IWUGO, &gscd_fops, NULL);
blk_init_queue(BLK_DEFAULT_QUEUE(MAJOR_NR), do_gscd_request, &gscd_lock);
blksize_size[MAJOR_NR] = gscd_blocksizes;
disk_state = 0;
gscdPresent = 1;
......
......@@ -107,9 +107,6 @@
#define mcd_port mcd /* for compatible parameter passing with "insmod" */
#include "mcd.h"
static int mcd_blocksizes[1];
/* I added A flag to drop to 1x speed if too many errors 0 = 1X ; 1 = 2X */
static int mcdDouble;
......@@ -1072,7 +1069,6 @@ int __init mcd_init(void)
return -EIO;
}
blksize_size[MAJOR_NR] = mcd_blocksizes;
blk_init_queue(BLK_DEFAULT_QUEUE(MAJOR_NR), do_mcd_request,
&mcd_spinlock);
......
......@@ -285,7 +285,6 @@ static int mcdx_setattentuator(struct s_drive_stuff *,
/* static variables ************************************************/
static int mcdx_blocksizes[MCDX_NDRIVES];
static int mcdx_drive_map[][2] = MCDX_DRIVEMAP;
static struct s_drive_stuff *mcdx_stuffp[MCDX_NDRIVES];
static struct s_drive_stuff *mcdx_irq_map[16] = { 0, 0, 0, 0, 0, 0, 0, 0,
......@@ -1086,8 +1085,6 @@ int __init mcdx_init_drive(int drive)
int size = sizeof(*stuffp);
char msg[80];
mcdx_blocksizes[drive] = 0;
xtrace(INIT, "init() try drive %d\n", drive);
xtrace(INIT, "kmalloc space for stuffpt's\n");
......@@ -1184,7 +1181,6 @@ int __init mcdx_init_drive(int drive)
blk_init_queue(BLK_DEFAULT_QUEUE(MAJOR_NR), do_mcdx_request,
&mcdx_lock);
blksize_size[MAJOR_NR] = mcdx_blocksizes;
xtrace(INIT, "init() subscribe irq and i/o\n");
mcdx_irq_map[stuffp->irq] = stuffp;
......
......@@ -108,8 +108,6 @@ static void debug(int debug_this, const char* fmt, ...)
#define DEBUG(x)
#endif
static int blksize = 2048;
/* Drive hardware/firmware characteristics
Identifiers in accordance with Optics Storage documentation */
......@@ -2059,9 +2057,9 @@ int __init optcd_init(void)
}
devfs_register (NULL, "optcd", DEVFS_FL_DEFAULT, MAJOR_NR, 0,
S_IFBLK | S_IRUGO | S_IWUGO, &opt_fops, NULL);
blksize_size[MAJOR_NR] = &blksize;
blk_init_queue(BLK_DEFAULT_QUEUE(MAJOR_NR), do_optcd_request,
&optcd_lock);
blk_queue_hardsect_size(BLK_DEFAULT_QUEUE(MAJOR_NR), 2048);
request_region(optcd_port, 4, "optcd");
register_disk(NULL, mk_kdev(MAJOR_NR,0), 1, &opt_fops, 0);
......
......@@ -674,7 +674,6 @@ static int n_retries=6;
static int ndrives;
static u_char drv_pattern[NR_SBPCD]={speed_auto,speed_auto,speed_auto,speed_auto};
static int sbpcd_blocksizes[NR_SBPCD];
/*==========================================================================*/
/*
......@@ -5928,12 +5927,8 @@ int __init SBPCD_INIT(void)
{
printk(" sbpcd: Unable to register with Uniform CD-ROm driver\n");
}
/*
* set the block size
*/
sbpcd_blocksizes[j]=CD_FRAMESIZE;
}
blksize_size[MAJOR_NR]=sbpcd_blocksizes;
blk_queue_hardsect_size(BLK_DEFAULT_QUEUE(MAJOR_NR), CD_FRAMESIZE);
#ifndef MODULE
init_done:
......
......@@ -1658,8 +1658,6 @@ static struct block_device_operations sjcd_fops = {
check_media_change:sjcd_disk_change,
};
static int blksize = 2048;
/*
* Following stuff is intended for initialization of the cdrom. It
* first looks for presence of device. If the device is present, it
......@@ -1686,8 +1684,6 @@ int __init sjcd_init(void)
printk("SJCD: sjcd=0x%x: ", sjcd_base);
#endif
blksize_size[MAJOR_NR] = &blksize;
if (devfs_register_blkdev(MAJOR_NR, "sjcd", &sjcd_fops) != 0) {
printk("SJCD: Unable to get major %d for Sanyo CD-ROM\n",
MAJOR_NR);
......@@ -1695,6 +1691,7 @@ int __init sjcd_init(void)
}
blk_init_queue(BLK_DEFAULT_QUEUE(MAJOR_NR), do_sjcd_request, &sjcd_lock);
blk_queue_hardsect_size(BLK_DEFAULT_QUEUE(MAJOR_NR), 2048);
register_disk(NULL, mk_kdev(MAJOR_NR, 0), 1, &sjcd_fops, 0);
if (check_region(sjcd_base, 4)) {
......
......@@ -1471,8 +1471,6 @@ static struct block_device_operations cdu_fops =
check_media_change: cdu535_check_media_change,
};
static int sonycd535_block_size = CDU535_BLOCK_SIZE;
/*
* Initialize the driver.
*/
......@@ -1600,8 +1598,7 @@ sony535_init(void)
blk_init_queue(BLK_DEFAULT_QUEUE(MAJOR_NR),
do_cdu535_request,
&sonycd535_lock);
blksize_size[MAJOR_NR] = &sonycd535_block_size;
blk_queue_hardsect_size(BLK_DEFAULT_QUEUE(MAJOR_NR), CDU535_BLOCK_SIZE);
sony_toc = (struct s535_sony_toc *)
kmalloc(sizeof *sony_toc, GFP_KERNEL);
if (sony_toc == NULL) {
......
......@@ -34,8 +34,6 @@
#include "ataraid.h"
static int ataraid_blksize_size[256];
static struct raid_device_operations* ataraid_ops[16];
static int ataraid_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg);
......@@ -236,14 +234,8 @@ static __init int ataraid_init(void)
{
int i;
for(i=0;i<256;i++)
{
ataraid_blksize_size[i] = 1024;
ataraid_readahead[i] = 1023;
}
if (blksize_size[ATAMAJOR]==NULL)
blksize_size[ATAMAJOR] = ataraid_blksize_size;
/* setup the gendisk structure */
ataraid_gendisk.part = kmalloc(256 * sizeof(struct hd_struct),GFP_KERNEL);
if (ataraid_gendisk.part==NULL) {
......@@ -281,7 +273,6 @@ static void __exit ataraid_exit(void)
{
unregister_blkdev(ATAMAJOR, "ataraid");
blk_size[ATAMAJOR] = NULL;
blksize_size[ATAMAJOR] = NULL;
del_gendisk(&ataraid_gendisk);
......
......@@ -107,8 +107,6 @@ static int NR_HD;
static struct hd_struct hd[MAX_HD<<6];
static int hd_sizes[MAX_HD<<6];
static int hd_blocksizes[MAX_HD<<6];
static struct timer_list device_timer;
......@@ -727,10 +725,6 @@ static void __init hd_geninit(void)
{
int drive;
for(drive=0; drive < (MAX_HD << 6); drive++)
hd_blocksizes[drive] = 1024;
blksize_size[MAJOR_NR] = hd_blocksizes;
blk_queue_hardsect_size(QUEUE, 512);
#ifdef __i386__
......
......@@ -2677,7 +2677,6 @@ int ide_cdrom_setup(ide_drive_t *drive)
* default to read-only always and fix latter at the bottom
*/
set_device_ro(mk_kdev(drive->channel->major, minor), 1);
set_blocksize(mk_kdev(drive->channel->major, minor), CD_FRAMESIZE);
blk_queue_hardsect_size(&drive->queue, CD_FRAMESIZE);
blk_queue_prep_rq(&drive->queue, ll_10byte_cmd_build);
......@@ -2861,13 +2860,7 @@ void ide_cdrom_revalidate (ide_drive_t *drive)
/* for general /dev/cdrom like mounting, one big disc */
drive->part[0].nr_sects = toc->capacity * SECTORS_PER_FRAME;
drive->channel->gd->sizes[minor] = toc->capacity * BLOCKS_PER_FRAME;
/*
* reset block size, ide_revalidate_disk incorrectly sets it to
* 1024 even for CDROM's
*/
blk_size[drive->channel->major] = drive->channel->gd->sizes;
set_blocksize(mk_kdev(drive->channel->major, minor), CD_FRAMESIZE);
}
static sector_t ide_cdrom_capacity(struct ata_device *drive)
......
......@@ -781,12 +781,6 @@ static void init_gendisk(struct ata_channel *hwif)
goto err_kmalloc_gd_part;
memset(gd->part, 0, minors * sizeof(struct hd_struct));
blksize_size[hwif->major] = kmalloc (minors*sizeof(int), GFP_KERNEL);
if (!blksize_size[hwif->major])
goto err_kmalloc_bs;
for (i = 0; i < minors; ++i)
blksize_size[hwif->major][i] = BLOCK_SIZE;
for (unit = 0; unit < MAX_DRIVES; ++unit)
hwif->drives[unit].part = &gd->part[unit << PARTN_BITS];
......
......@@ -2200,7 +2200,6 @@ void ide_unregister(struct ata_channel *ch)
* Remove us from the kernel's knowledge.
*/
unregister_blkdev(ch->major, ch->name);
kfree(blksize_size[ch->major]);
blk_dev[ch->major].data = NULL;
blk_dev[ch->major].queue = NULL;
blk_clear(ch->major);
......
......@@ -105,7 +105,6 @@ static ctl_table raid_root_table[] = {
* subsystems want to have a pre-defined structure
*/
struct hd_struct md_hd_struct[MAX_MD_DEVS];
static int md_blocksizes[MAX_MD_DEVS];
static int md_maxreadahead[MAX_MD_DEVS];
static mdk_thread_t *md_recovery_thread;
......@@ -1579,7 +1578,7 @@ static int device_size_calculation(mddev_t * mddev)
md_size[mdidx(mddev)] = sb->size * data_disks;
readahead = (VM_MAX_READAHEAD * 1024) / PAGE_SIZE;
ra_pages = blk_get_ra_pages(rdev->dev);
ra_pages = blk_get_ra_pages(rdev->bdev);
if (ra_pages)
readahead = (*ra_pages * PAGE_CACHE_SIZE) / PAGE_SIZE;
if (!sb->level || (sb->level == 4) || (sb->level == 5)) {
......@@ -1714,9 +1713,18 @@ static int do_md_run(mddev_t * mddev)
if (rdev->faulty)
continue;
invalidate_device(rdev->dev, 1);
#if 0
/*
* Aside of obvious breakage (code below results in block size set
* according to the sector size of last component instead of the
* maximal sector size), we have more interesting problem here.
* Namely, we actually ought to set _sector_ size for the array
* and that requires per-array request queues. Disabled for now.
*/
md_blocksizes[mdidx(mddev)] = 1024;
if (bdev_hardsect_size(rdev->bdev) > md_blocksizes[mdidx(mddev)])
md_blocksizes[mdidx(mddev)] = bdev_hardsect_size(rdev->bdev);
#endif
}
mddev->pers = pers[pnum];
......@@ -3613,11 +3621,9 @@ static void md_geninit(void)
int i;
for(i = 0; i < MAX_MD_DEVS; i++) {
md_blocksizes[i] = 1024;
md_size[i] = 0;
md_maxreadahead[i] = 32;
}
blksize_size[MAJOR_NR] = md_blocksizes;
blk_size[MAJOR_NR] = md_size;
dprintk("md: sizeof(mdp_super_t) = %d\n", (int)sizeof(mdp_super_t));
......
......@@ -738,43 +738,56 @@ static int __check_consistency (mddev_t *mddev, int row)
{
multipath_conf_t *conf = mddev_to_conf(mddev);
int disks = MD_SB_DISKS;
kdev_t dev;
struct block_device *bdev;
struct buffer_head *bh = NULL;
int i, rc = 0;
char *buffer = NULL;
char *buffer;
struct page *page = NULL;
int first = 1;
int order = PAGE_CACHE_SHIFT-PAGE_SHIFT;
buffer = (char *) __get_free_pages(GFP_KERNEL, order);
if (!buffer)
return rc;
for (i = 0; i < disks; i++) {
struct address_space *mapping;
char *p;
if (!conf->multipaths[i].operational)
continue;
printk("(checking disk %d)\n",i);
dev = conf->multipaths[i].dev;
bdev = conf->multipaths[i].bdev;
set_blocksize(dev, 4096);
if ((bh = __bread(bdev, row / 4, 4096)) == NULL)
mapping = bdev->bd_inode->i_mapping;
page = read_cache_page(mapping, row/(PAGE_CACHE_SIZE/1024),
(filler_t *)mapping->a_ops->readpage, NULL);
if (IS_ERR(page)) {
page = NULL;
break;
if (!buffer) {
buffer = (char *) __get_free_page(GFP_KERNEL);
if (!buffer)
break;
memcpy(buffer, bh->b_data, 4096);
} else if (memcmp(buffer, bh->b_data, 4096)) {
}
wait_on_page_locked(page);
if (!PageUptodate(page))
break;
if (PageError(page))
break;
p = page_address(page);
if (first) {
memcpy(buffer, p, PAGE_CACHE_SIZE);
first = 0;
} else if (memcmp(buffer, p, PAGE_CACHE_SIZE)) {
rc = 1;
break;
}
bforget(bh);
page_cache_release(page);
fsync_bdev(bdev);
invalidate_bdev(bdev, 0);
bh = NULL;
page = NULL;
}
if (buffer)
free_page((unsigned long) buffer);
if (bh) {
bdev = bh->b_bdev;
bforget(bh);
if (page) {
bdev = page->mapping->host->i_bdev;
page_cache_release(page);
fsync_bdev(bdev);
invalidate_bdev(bdev, 0);
}
free_pages((unsigned long) buffer, order);
return rc;
}
......
......@@ -123,7 +123,6 @@
* Some of these can be made smaller later
*/
static int i2ob_blksizes[MAX_I2OB<<4];
static int i2ob_sizes[MAX_I2OB<<4];
static int i2ob_media_change_flag[MAX_I2OB];
......@@ -1815,7 +1814,6 @@ int i2o_block_init(void)
* Now fill in the boiler plate
*/
blksize_size[MAJOR_NR] = i2ob_blksizes;
blk_size[MAJOR_NR] = i2ob_sizes;
blk_dev[MAJOR_NR].queue = i2ob_get_queue;
......@@ -1830,7 +1828,6 @@ int i2o_block_init(void)
i2ob_dev[i].head = NULL;
i2ob_dev[i].tail = NULL;
i2ob_dev[i].depth = MAX_I2OB_DEPTH;
i2ob_blksizes[i] = 1024;
}
/*
......
......@@ -1062,6 +1062,7 @@ static int __init init_blkmtd(void)
int readonly = 0;
int erase_size = CONFIG_MTD_BLKDEV_ERASESIZE;
kdev_t rdev;
struct block_device *bdev;
int err;
int mode;
int regions;
......@@ -1127,11 +1128,16 @@ static int __init init_blkmtd(void)
printk("blkmtd: attempting to use an MTD device as a block device\n");
return 1;
}
/* get the block device */
bdev = bdget(kdev_t_to_nr(mk_kdev(maj, min)));
err = blkdev_get(bdev, mode, 0, BDEV_RAW);
if (err)
return 1;
DEBUG(1, "blkmtd: devname = %s\n", __bdevname(rdev));
DEBUG(1, "blkmtd: devname = %s\n", bdevname(bdev));
blocksize = BLOCK_SIZE;
blocksize = bs ? bs : block_size(rdev);
blocksize = bs ? bs : block_size(bdev);
i = blocksize;
blocksize_bits = 0;
while(i != 1) {
......@@ -1139,27 +1145,24 @@ static int __init init_blkmtd(void)
i >>= 1;
}
size = (count ? count*blocksize : blkdev_size_in_bytes(rdev));
size = count ? count*blocksize : bdev->bd_inode->i_size;
DEBUG(1, "blkmtd: size = %ld\n", (long int)size);
if(size == 0) {
printk("blkmtd: cant determine size\n");
blkdev_put(bdev, BDEV_RAW);
return 1;
}
mtd_rawdevice = (mtd_raw_dev_data_t *)kmalloc(sizeof(mtd_raw_dev_data_t), GFP_KERNEL);
if(mtd_rawdevice == NULL) {
blkdev_put(bdev, BDEV_RAW);
err = -ENOMEM;
goto init_err;
}
memset(mtd_rawdevice, 0, sizeof(mtd_raw_dev_data_t));
/* get the block device */
mtd_rawdevice->binding = bdget(kdev_t_to_nr(mk_kdev(maj, min)));
err = blkdev_get(mtd_rawdevice->binding, mode, 0, BDEV_RAW);
if (err) {
goto init_err;
}
mtd_rawdevice->binding = bdev;
mtd_rawdevice->totalsize = size;
mtd_rawdevice->sector_size = blocksize;
mtd_rawdevice->sector_bits = blocksize_bits;
......
......@@ -177,7 +177,6 @@ static struct mtd_notifier ftl_notifier = {
static struct hd_struct ftl_hd[MINOR_NR(MAX_DEV, 0, 0)];
static int ftl_sizes[MINOR_NR(MAX_DEV, 0, 0)];
static int ftl_blocksizes[MINOR_NR(MAX_DEV, 0, 0)];
static struct gendisk ftl_gendisk = {
major: FTL_MAJOR,
......@@ -1345,13 +1344,10 @@ int init_ftl(void)
return -EAGAIN;
}
for (i = 0; i < MINOR_NR(MAX_DEV, 0, 0); i++)
ftl_blocksizes[i] = 1024;
for (i = 0; i < MAX_DEV*MAX_PART; i++) {
ftl_hd[i].nr_sects = 0;
ftl_hd[i].start_sect = 0;
}
blksize_size[FTL_MAJOR] = ftl_blocksizes;
ftl_gendisk.major = FTL_MAJOR;
blk_init_queue(BLK_DEFAULT_QUEUE(FTL_MAJOR), &do_ftl_request);
add_gendisk(&ftl_gendisk);
......
......@@ -55,8 +55,6 @@ static struct mtdblk_dev {
static spinlock_t mtdblks_lock;
static int mtd_sizes[MAX_MTD_DEVICES];
static int mtd_blksizes[MAX_MTD_DEVICES];
/*
* Cache stuff...
......@@ -346,10 +344,6 @@ static int mtdblock_open(struct inode *inode, struct file *file)
mtdblks[dev] = mtdblk;
mtd_sizes[dev] = mtdblk->mtd->size/1024;
if (mtdblk->mtd->erasesize)
mtd_blksizes[dev] = mtdblk->mtd->erasesize;
if (mtd_blksizes[dev] > PAGE_SIZE)
mtd_blksizes[dev] = PAGE_SIZE;
set_device_ro (inode->i_rdev, !(mtdblk->mtd->flags & MTD_WRITEABLE));
spin_unlock(&mtdblks_lock);
......@@ -626,13 +620,9 @@ int __init init_mtdblock(void)
#endif
/* We fill it in at open() time. */
for (i=0; i< MAX_MTD_DEVICES; i++) {
for (i=0; i< MAX_MTD_DEVICES; i++)
mtd_sizes[i] = 0;
mtd_blksizes[i] = BLOCK_SIZE;
}
init_waitqueue_head(&thr_wq);
/* Allow the block size to default to BLOCK_SIZE. */
blksize_size[MAJOR_NR] = mtd_blksizes;
blk_size[MAJOR_NR] = mtd_sizes;
blk_init_queue(BLK_DEFAULT_QUEUE(MAJOR_NR), &mtdblock_request, &mtddev_lock);
......@@ -653,7 +643,6 @@ static void __exit cleanup_mtdblock(void)
unregister_blkdev(MAJOR_NR,DEVICE_NAME);
#endif
blk_cleanup_queue(BLK_DEFAULT_QUEUE(MAJOR_NR));
blksize_size[MAJOR_NR] = NULL;
blk_size[MAJOR_NR] = NULL;
}
......
......@@ -267,7 +267,6 @@ int __init init_mtdblock(void)
}
/* Allow the block size to default to BLOCK_SIZE. */
blksize_size[MAJOR_NR] = NULL;
blk_size[MAJOR_NR] = mtd_sizes;
blk_init_queue(BLK_DEFAULT_QUEUE(MAJOR_NR), &mtdblock_request);
......@@ -277,7 +276,6 @@ int __init init_mtdblock(void)
static void __exit cleanup_mtdblock(void)
{
unregister_blkdev(MAJOR_NR,DEVICE_NAME);
blksize_size[MAJOR_NR] = NULL;
blk_cleanup_queue(BLK_DEFAULT_QUEUE(MAJOR_NR));
}
......
......@@ -53,7 +53,6 @@
*/
static int nftl_sizes[256];
static int nftl_blocksizes[256];
/* .. for the Linux partition table handling. */
struct hd_struct part_table[256];
......@@ -146,7 +145,6 @@ static void NFTL_setup(struct mtd_info *mtd)
NFTLs[firstfree] = nftl;
/* Finally, set up the block device sizes */
nftl_sizes[firstfree * 16] = nftl->nr_sects;
//nftl_blocksizes[firstfree*16] = 512;
part_table[firstfree * 16].nr_sects = nftl->nr_sects;
nftl_gendisk.nr_real++;
......@@ -1027,13 +1025,6 @@ int __init init_nftl(void)
return -EBUSY;
} else {
blk_init_queue(BLK_DEFAULT_QUEUE(MAJOR_NR), &nftl_request);
/* set block size to 1kB each */
for (i = 0; i < 256; i++) {
nftl_blocksizes[i] = 1024;
}
blksize_size[MAJOR_NR] = nftl_blocksizes;
add_gendisk(&nftl_gendisk);
}
......
......@@ -138,6 +138,18 @@ CONFIG_LITELINK_DONGLE
used by IrTTY. To activate support for Parallax dongles you will
have to start irattach like this "irattach -d litelink".
Microchip MCP2120 dongle
CONFIG_MCP2120_DONGLE
Say Y here if you want to build support for the Microchip MCP2120
dongle. If you want to compile it as a module, say M here and read
<file:Documentation/modules.txt>. The MCP2120 dongle attaches to
the normal 9-pin serial port connector, and can currently only be
used by IrTTY. To activate support for MCP2120 dongles you will
have to insert "irattach -d mcp2120" in the /etc/irda/drivers script.
You must build this dongle yourself. For more information see:
<http://www.eyetap.org/~tangf/irda_sir_linux.html>
CONFIG_OLD_BELKIN_DONGLE
Say Y here if you want to build support for the Adaptec Airport 1000
and 2000 dongles. If you want to compile it as a module, say M here
......
......@@ -13,6 +13,7 @@ if [ "$CONFIG_DONGLE" != "n" ]; then
dep_tristate ' Tekram IrMate 210B dongle' CONFIG_TEKRAM_DONGLE $CONFIG_IRDA
dep_tristate ' Greenwich GIrBIL dongle' CONFIG_GIRBIL_DONGLE $CONFIG_IRDA
dep_tristate ' Parallax LiteLink dongle' CONFIG_LITELINK_DONGLE $CONFIG_IRDA
dep_tristate ' Microchip MCP2120' CONFIG_MCP2120_DONGLE $CONFIG_IRDA
dep_tristate ' Old Belkin dongle' CONFIG_OLD_BELKIN_DONGLE $CONFIG_IRDA
if [ "$CONFIG_ARCH_EP7211" = "y" ]; then
dep_tristate ' EP7211 I/R support' CONFIG_EP7211_IR $CONFIG_IRDA
......
......@@ -28,5 +28,6 @@ obj-$(CONFIG_GIRBIL_DONGLE) += girbil.o
obj-$(CONFIG_LITELINK_DONGLE) += litelink.o
obj-$(CONFIG_OLD_BELKIN_DONGLE) += old_belkin.o
obj-$(CONFIG_EP7211_IR) += ep7211_ir.o
obj-$(CONFIG_MCP2120_DONGLE) += mcp2120.o
include $(TOPDIR)/Rules.make
......@@ -970,9 +970,14 @@ static int irtty_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
IRDA_DEBUG(3, __FUNCTION__ "(), %s, (cmd=0x%X)\n", dev->name, cmd);
/* Disable interrupts & save flags */
save_flags(flags);
cli();
/* Locking :
* irda_device_dongle_init() can't be locked.
* irda_task_execute() doesn't need to be locked (but
* irtty_change_speed() should protect itself).
* As this driver doesn't have spinlock protection, keep
* old fashion locking :-(
* Jean II
*/
switch (cmd) {
case SIOCSBANDWIDTH: /* Set bandwidth */
......@@ -998,14 +1003,17 @@ static int irtty_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
dongle->write = irtty_raw_write;
dongle->set_dtr_rts = irtty_set_dtr_rts;
self->dongle = dongle;
/* Now initialize the dongle! */
/* Now initialize the dongle!
* Safe to do unlocked : self->dongle is still NULL. */
dongle->issue->open(dongle, &self->qos);
/* Reset dongle */
irda_task_execute(dongle, dongle->issue->reset, NULL, NULL,
NULL);
/* Make dongle available to driver only now to avoid
* race conditions - Jean II */
self->dongle = dongle;
break;
case SIOCSMEDIABUSY: /* Set media busy */
if (!capable(CAP_NET_ADMIN))
......@@ -1019,21 +1027,27 @@ static int irtty_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
case SIOCSDTRRTS:
if (!capable(CAP_NET_ADMIN))
ret = -EPERM;
else
else {
save_flags(flags);
cli();
irtty_set_dtr_rts(dev, irq->ifr_dtr, irq->ifr_rts);
restore_flags(flags);
}
break;
case SIOCSMODE:
if (!capable(CAP_NET_ADMIN))
ret = -EPERM;
else
else {
save_flags(flags);
cli();
irtty_set_mode(dev, irq->ifr_mode);
restore_flags(flags);
}
break;
default:
ret = -EOPNOTSUPP;
}
restore_flags(flags);
return ret;
}
......
/*********************************************************************
*
*
* Filename: mcp2120.c
* Version: 1.0
* Description: Implementation for the MCP2120 (Microchip)
* Status: Experimental.
* Author: Felix Tang (tangf@eyetap.org)
* Created at: Sun Mar 31 19:32:12 EST 2002
* Based on code by: Dag Brattli <dagb@cs.uit.no>
*
* Copyright (c) 2002 Felix Tang, All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation; either version 2 of
* the License, or (at your option) any later version.
*
********************************************************************/
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/tty.h>
#include <linux/sched.h>
#include <linux/init.h>
#include <net/irda/irda.h>
#include <net/irda/irmod.h>
#include <net/irda/irda_device.h>
#include <net/irda/irtty.h>
static int mcp2120_reset(struct irda_task *task);
static void mcp2120_open(dongle_t *self, struct qos_info *qos);
static void mcp2120_close(dongle_t *self);
static int mcp2120_change_speed(struct irda_task *task);
#define MCP2120_9600 0x87
#define MCP2120_19200 0x8B
#define MCP2120_38400 0x85
#define MCP2120_57600 0x83
#define MCP2120_115200 0x81
#define MCP2120_COMMIT 0x11
static struct dongle_reg dongle = {
Q_NULL,
IRDA_MCP2120_DONGLE,
mcp2120_open,
mcp2120_close,
mcp2120_reset,
mcp2120_change_speed,
};
int __init mcp2120_init(void)
{
return irda_device_register_dongle(&dongle);
}
void mcp2120_cleanup(void)
{
irda_device_unregister_dongle(&dongle);
}
static void mcp2120_open(dongle_t *self, struct qos_info *qos)
{
qos->baud_rate.bits &= IR_9600|IR_19200|IR_38400|IR_57600|IR_115200;
qos->min_turn_time.bits = 0x01;
MOD_INC_USE_COUNT;
}
static void mcp2120_close(dongle_t *self)
{
/* Power off dongle */
/* reset and inhibit mcp2120 */
self->set_dtr_rts(self->dev, TRUE, TRUE);
//self->set_dtr_rts(self->dev, FALSE, FALSE);
MOD_DEC_USE_COUNT;
}
/*
* Function mcp2120_change_speed (dev, speed)
*
* Set the speed for the MCP2120.
*
*/
static int mcp2120_change_speed(struct irda_task *task)
{
dongle_t *self = (dongle_t *) task->instance;
__u32 speed = (__u32) task->param;
__u8 control[2];
int ret = 0;
self->speed_task = task;
switch (task->state) {
case IRDA_TASK_INIT:
/* Need to reset the dongle and go to 9600 bps before
programming */
//printk("Dmcp2120_change_speed irda_task_init\n");
if (irda_task_execute(self, mcp2120_reset, NULL, task,
(void *) speed))
{
/* Dongle need more time to reset */
irda_task_next_state(task, IRDA_TASK_CHILD_WAIT);
/* Give reset 1 sec to finish */
ret = MSECS_TO_JIFFIES(1000);
}
break;
case IRDA_TASK_CHILD_WAIT:
WARNING(__FUNCTION__ "(), resetting dongle timed out!\n");
ret = -1;
break;
case IRDA_TASK_CHILD_DONE:
/* Set DTR to enter command mode */
self->set_dtr_rts(self->dev, TRUE, FALSE);
udelay(500);
switch (speed) {
case 9600:
default:
control[0] = MCP2120_9600;
//printk("mcp2120 9600\n");
break;
case 19200:
control[0] = MCP2120_19200;
//printk("mcp2120 19200\n");
break;
case 34800:
control[0] = MCP2120_38400;
//printk("mcp2120 38400\n");
break;
case 57600:
control[0] = MCP2120_57600;
//printk("mcp2120 57600\n");
break;
case 115200:
control[0] = MCP2120_115200;
//printk("mcp2120 115200\n");
break;
}
control[1] = MCP2120_COMMIT;
/* Write control bytes */
self->write(self->dev, control, 2);
irda_task_next_state(task, IRDA_TASK_WAIT);
ret = MSECS_TO_JIFFIES(100);
//printk("mcp2120_change_speed irda_child_done\n");
break;
case IRDA_TASK_WAIT:
/* Go back to normal mode */
self->set_dtr_rts(self->dev, FALSE, FALSE);
irda_task_next_state(task, IRDA_TASK_DONE);
self->speed_task = NULL;
//printk("mcp2120_change_speed irda_task_wait\n");
break;
default:
ERROR(__FUNCTION__ "(), unknown state %d\n", task->state);
irda_task_next_state(task, IRDA_TASK_DONE);
self->speed_task = NULL;
ret = -1;
break;
}
return ret;
}
/*
* Function mcp2120_reset (driver)
*
* This function resets the mcp2120 dongle.
*
* Info: -set RTS to reset mcp2120
* -set DTR to set mcp2120 software command mode
* -mcp2120 defaults to 9600 baud after reset
*
* Algorithm:
* 0. Set RTS to reset mcp2120.
* 1. Clear RTS and wait for device reset timer of 30 ms (max).
*
*/
static int mcp2120_reset(struct irda_task *task)
{
dongle_t *self = (dongle_t *) task->instance;
int ret = 0;
self->reset_task = task;
switch (task->state) {
case IRDA_TASK_INIT:
//printk("mcp2120_reset irda_task_init\n");
/* Reset dongle by setting RTS*/
self->set_dtr_rts(self->dev, TRUE, TRUE);
irda_task_next_state(task, IRDA_TASK_WAIT1);
ret = MSECS_TO_JIFFIES(50);
break;
case IRDA_TASK_WAIT1:
//printk("mcp2120_reset irda_task_wait1\n");
/* clear RTS and wait for at least 30 ms. */
self->set_dtr_rts(self->dev, FALSE, FALSE);
irda_task_next_state(task, IRDA_TASK_WAIT2);
ret = MSECS_TO_JIFFIES(50);
break;
case IRDA_TASK_WAIT2:
//printk("mcp2120_reset irda_task_wait2\n");
/* Go back to normal mode */
self->set_dtr_rts(self->dev, FALSE, FALSE);
irda_task_next_state(task, IRDA_TASK_DONE);
self->reset_task = NULL;
break;
default:
ERROR(__FUNCTION__ "(), unknown state %d\n", task->state);
irda_task_next_state(task, IRDA_TASK_DONE);
self->reset_task = NULL;
ret = -1;
break;
}
return ret;
}
#ifdef MODULE
MODULE_AUTHOR("Felix Tang <tangf@eyetap.org>");
MODULE_DESCRIPTION("Microchip MCP2120");
MODULE_LICENSE("GPL");
/*
* Function init_module (void)
*
* Initialize MCP2120 module
*
*/
int init_module(void)
{
return mcp2120_init();
}
/*
* Function cleanup_module (void)
*
* Cleanup MCP2120 module
*
*/
void cleanup_module(void)
{
mcp2120_cleanup();
}
#endif /* MODULE */
......@@ -716,13 +716,6 @@ dasd_register_major (major_info_t * major_info)
goto out_blk_size;
memset (blk_size[major], 0, (1 << MINORBITS) * sizeof (int));
/* init blksize_size */
blksize_size[major] =
(int *) kmalloc ((1 << MINORBITS) * sizeof (int), GFP_ATOMIC);
if (!blksize_size[major])
goto out_blksize_size;
memset (blksize_size[major], 0, (1 << MINORBITS) * sizeof (int));
/* finally do the gendisk stuff */
major_info->gendisk.part = kmalloc ((1 << MINORBITS) *
sizeof (struct hd_struct),
......@@ -741,10 +734,6 @@ dasd_register_major (major_info_t * major_info)
/* error handling - free the prior allocated memory */
out_gendisk:
kfree (blksize_size[major]);
blksize_size[major] = NULL;
out_blksize_size:
kfree (blk_size[major]);
blk_size[major] = NULL;
......@@ -801,7 +790,6 @@ dasd_unregister_major (major_info_t * major_info)
kfree (major_info->gendisk.part);
kfree (blk_size[major]);
kfree (blksize_size[major]);
blk_clear(major);
......@@ -3339,7 +3327,6 @@ dasd_setup_blkdev (dasd_device_t *device )
sizes.s2b_shift) >> 1;
else
device->major_info->gendisk.sizes[minor + i] = 0;
blksize_size[major][minor + i] = device->sizes.bp_block;
blk_queue_max_sectors(device->request_queue,
device->discipline->max_blocks << device->sizes.s2b_shift);
device->major_info->gendisk.part[minor+i].start_sect = 0;
......@@ -3369,7 +3356,6 @@ dasd_disable_blkdev (dasd_device_t *device )
for (i = 0; i < (1 << DASD_PARTN_BITS); i++) {
destroy_buffers(MKDEV(major,minor+i));
device->major_info->gendisk.sizes[minor + i] = 0;
blksize_size[major][minor + i] = 0;
}
if (device->request_queue) {
blk_cleanup_queue (device->request_queue);
......
......@@ -189,7 +189,6 @@ MODULE_PARM_DESC(sizes, "list of device (partition) sizes " \
/* The following items are obtained through kmalloc() in init_module() */
Xpram_Dev *xpram_devices = NULL;
int *xpram_blksizes = NULL;
int *xpram_offsets = NULL; /* partition offsets */
#define MIN(x,y) ((x) < (y) ? (x) : (y))
......@@ -1062,16 +1061,6 @@ int xpram_init(void)
PRINT_DEBUG(" device(%d) offset = %d kB, size = %d kB\n",i, xpram_offsets[i], xpram_sizes[i]);
#endif
xpram_blksizes = kmalloc(xpram_devs * sizeof(int), GFP_KERNEL);
if (!xpram_blksizes) {
PRINT_ERR("Not enough memory for xpram_blksizes\n");
PRINT_ERR("Giving up xpram\n");
goto fail_malloc_blksizes;
}
for (i=0; i < xpram_devs; i++) /* all the same blocksize */
xpram_blksizes[i] = xpram_blksize;
blksize_size[major]=xpram_blksizes;
/*
* allocate the devices -- we can't have them static, as the number
* can be specified at load time
......@@ -1142,10 +1131,7 @@ int xpram_init(void)
}
kfree(xpram_devices);
#endif /* V24 */
fail_malloc_blksizes:
kfree (xpram_offsets);
kfree (xpram_blksizes);
blksize_size[major] = NULL;
fail_malloc_devices:
fail_malloc:
#if (XPRAM_VERSION == 22)
......@@ -1183,7 +1169,6 @@ void cleanup_module(void)
#if (XPRAM_VERSION == 22)
blk_dev[major].request_fn = NULL;
#endif /* V22 */
kfree(blksize_size[major]);
kfree(xpram_offsets);
blk_clear(major);
......
......@@ -1344,11 +1344,13 @@ ccw_req_t * tape34xx_bread (struct request *req,tape_info_t* ti,int tapeblock_ma
ccw1_t *ccw;
__u8 *data;
kdev_t dev = mk_kdev(tapeblock_major, ti->blk_minor);
unsigned bsize = block_size(dev);
struct block_device *bdev = bdget(kdev_t_to_nr(dev));
unsigned bsize = block_size(bdev);
int s2b = bsize/queue_hardsect_size(&ti->request_queue);
int realcount;
int size,bhct = 0;
struct buffer_head* bh;
bdput(bdev);
for (bh = req->bh; bh; bh = bh->b_reqnext) {
if (bh->b_size > bsize)
for (size = 0; size < bh->b_size; size += bsize)
......
......@@ -74,8 +74,8 @@ tapeblock_rmdevfstree (tape_info_t* ti) {
void
tapeblock_setup(tape_info_t* ti) {
blk_size[tapeblock_major][ti->blk_minor]=0; // this will be detected
blksize_size[tapeblock_major][ti->blk_minor]=2048; // blocks are 2k by default.
blk_init_queue (&ti->request_queue, tape_request_fn);
blk_queue_hardsect_size(&ti->request_queue, 2048);
#ifdef CONFIG_DEVFS_FS
tapeblock_mkdevfstree(ti);
#endif
......@@ -103,8 +103,6 @@ tapeblock_init(void) {
PRINT_WARN(KERN_ERR " tape gets major %d for block device\n", result);
blk_size[tapeblock_major] = (int*) kmalloc (256*sizeof(int),GFP_ATOMIC);
memset(blk_size[tapeblock_major],0,256*sizeof(int));
blksize_size[tapeblock_major] = (int*) kmalloc (256*sizeof(int),GFP_ATOMIC);
memset(blksize_size[tapeblock_major],0,256*sizeof(int));
max_sectors[tapeblock_major] = (int*) kmalloc (256*sizeof(int),GFP_ATOMIC);
memset(max_sectors[tapeblock_major],0,256*sizeof(int));
blkfront = kmalloc(sizeof(tape_frontend_t),GFP_KERNEL);
......@@ -585,6 +583,6 @@ int tapeblock_mediumdetect(tape_info_t* ti) {
s390irq_spin_unlock_irqrestore (ti->devinfo.irq, lockflags);
losize=(hisize+losize)/2+1;
}
blk_size[tapeblock_major][ti->blk_minor]=(losize)*(blksize_size[tapeblock_major][ti->blk_minor]/1024);
blk_size[tapeblock_major][ti->blk_minor]=losize*2;
return 0;
}
......@@ -135,7 +135,6 @@ struct jsflash {
/*
*/
static int jsfd_blksizes[JSF_MAX];
static int jsfd_sizes[JSF_MAX];
static u64 jsfd_bytesizes[JSF_MAX];
......@@ -665,7 +664,6 @@ int jsfd_init(void) {
return -EIO;
}
blksize_size[JSFD_MAJOR] = jsfd_blksizes;
blk_size[JSFD_MAJOR] = jsfd_sizes;
blk_init_queue(BLK_DEFAULT_QUEUE(MAJOR_NR), DEVICE_REQUEST);
......@@ -676,7 +674,6 @@ int jsfd_init(void) {
jdp->refcnt = 0;
jsfd_blksizes[i] = 1024;
jsfd_bytesizes[i] = jdp->dsize;
jsfd_sizes[i] = jsfd_bytesizes[i] >> 10;
register_disk(NULL, MKDEV(JSFD_MAJOR, i), 1, &jsfd_fops,
......
......@@ -1013,13 +1013,7 @@ int scsi_decide_disposition(Scsi_Cmnd * SCpnt)
SCpnt->flags &= ~IS_RESETTING;
goto maybe_retry;
}
/*
* Examine the sense data to figure out how to proceed from here.
* If there is no sense data, we will be forced into the error
* handler thread, where we get to examine the thing in a lot more
* detail.
*/
return scsi_check_sense(SCpnt);
return SUCCESS;
default:
return FAILED;
}
......@@ -2002,6 +1996,7 @@ scsi_new_reset(Scsi_Cmnd *SCpnt, int flag)
return rtn;
}
/*
* Overrides for Emacs so that we follow Linus's tabbing style.
* Emacs will notice this stuff at the end of the file and automatically
......
......@@ -41,7 +41,7 @@ unsigned char *scsi_bios_ptable(kdev_t dev)
err = blkdev_get(bdev, FMODE_READ, 0, BDEV_FILE);
if (err)
goto fail;
bh = __bread(bdev, 0, block_size(rdev));
bh = __bread(bdev, 0, block_size(bdev));
if (!bh)
goto fail2;
memcpy(res, bh->b_data + 0x1be, 66);
......
This diff is collapsed.
......@@ -89,8 +89,6 @@ static struct Scsi_Device_Template sr_template =
Scsi_CD *scsi_CDs;
static int *sr_sizes;
static int *sr_blocksizes;
static int sr_open(struct cdrom_device_info *, int);
void get_sectorsize(int);
void get_capabilities(int);
......@@ -724,18 +722,6 @@ static int sr_init()
if (!sr_sizes)
goto cleanup_cds;
memset(sr_sizes, 0, sr_template.dev_max * sizeof(int));
sr_blocksizes = kmalloc(sr_template.dev_max * sizeof(int), GFP_ATOMIC);
if (!sr_blocksizes)
goto cleanup_sizes;
/*
* These are good guesses for the time being.
*/
for (i = 0; i < sr_template.dev_max; i++)
sr_blocksizes[i] = 2048;
blksize_size[MAJOR_NR] = sr_blocksizes;
return 0;
cleanup_sizes:
kfree(sr_sizes);
......@@ -845,9 +831,6 @@ static void __exit exit_sr(void)
kfree(sr_sizes);
sr_sizes = NULL;
kfree(sr_blocksizes);
sr_blocksizes = NULL;
}
blk_clear(MAJOR_NR);
......
......@@ -25,13 +25,13 @@
#define MAX_BUF_PER_PAGE (PAGE_CACHE_SIZE / 512)
static unsigned long max_block(kdev_t dev)
static unsigned long max_block(struct block_device *bdev)
{
unsigned int retval = ~0U;
loff_t sz = blkdev_size_in_bytes(dev);
loff_t sz = bdev->bd_inode->i_size;
if (sz) {
unsigned int size = block_size(dev);
unsigned int size = block_size(bdev);
unsigned int sizebits = blksize_bits(size);
retval = (sz >> sizebits);
}
......@@ -53,10 +53,10 @@ static void kill_bdev(struct block_device *bdev)
truncate_inode_pages(bdev->bd_inode->i_mapping, 0);
}
int set_blocksize(kdev_t dev, int size)
int set_blocksize(struct block_device *bdev, int size)
{
int oldsize;
struct block_device *bdev;
kdev_t dev = to_kdev_t(bdev->bd_dev);
/* Size must be a power of two, and between 512 and PAGE_SIZE */
if (size > PAGE_SIZE || size < 512 || (size & (size-1)))
......@@ -66,36 +66,22 @@ int set_blocksize(kdev_t dev, int size)
if (size < get_hardsect_size(dev))
return -EINVAL;
/* No blocksize array? Implies hardcoded BLOCK_SIZE */
if (!blksize_size[major(dev)]) {
if (size == BLOCK_SIZE)
return 0;
return -EINVAL;
}
oldsize = blksize_size[major(dev)][minor(dev)];
oldsize = bdev->bd_block_size;
if (oldsize == size)
return 0;
if (!oldsize && size == BLOCK_SIZE) {
blksize_size[major(dev)][minor(dev)] = size;
return 0;
}
/* Ok, we're actually changing the blocksize.. */
bdev = bdget(kdev_t_to_nr(dev));
sync_blockdev(bdev);
blksize_size[major(dev)][minor(dev)] = size;
bdev->bd_block_size = size;
bdev->bd_inode->i_blkbits = blksize_bits(size);
kill_bdev(bdev);
bdput(bdev);
return 0;
}
int sb_set_blocksize(struct super_block *sb, int size)
{
int bits;
if (set_blocksize(sb->s_dev, size) < 0)
if (set_blocksize(sb->s_bdev, size) < 0)
return 0;
sb->s_blocksize = size;
for (bits = 9, size >>= 9; size >>= 1; bits++)
......@@ -114,7 +100,7 @@ int sb_min_blocksize(struct super_block *sb, int size)
static int blkdev_get_block(struct inode * inode, sector_t iblock, struct buffer_head * bh, int create)
{
if (iblock >= max_block(inode->i_rdev))
if (iblock >= max_block(inode->i_bdev))
return -EIO;
bh->b_bdev = inode->i_bdev;
......@@ -333,7 +319,6 @@ struct block_device *bdget(dev_t dev)
struct inode *inode = new_inode(bd_mnt->mnt_sb);
if (inode) {
kdev_t kdev = to_kdev_t(dev);
unsigned long *ra_pages;
atomic_set(&new_bdev->bd_count,1);
new_bdev->bd_dev = dev;
......@@ -346,10 +331,7 @@ struct block_device *bdget(dev_t dev)
inode->i_bdev = new_bdev;
inode->i_data.a_ops = &def_blk_aops;
inode->i_data.gfp_mask = GFP_USER;
ra_pages = blk_get_ra_pages(kdev);
if (ra_pages == NULL)
ra_pages = &default_ra_pages;
inode->i_data.ra_pages = ra_pages;
inode->i_data.ra_pages = &default_ra_pages;
spin_lock(&bdev_lock);
bdev = bdfind(dev, head);
if (!bdev) {
......@@ -612,14 +594,29 @@ static int do_open(struct block_device *bdev, struct inode *inode, struct file *
}
}
}
if (bdev->bd_inode->i_data.ra_pages == &default_ra_pages) {
unsigned long *ra_pages = blk_get_ra_pages(bdev);
if (ra_pages == NULL)
ra_pages = &default_ra_pages;
inode->i_data.ra_pages = ra_pages;
}
if (bdev->bd_op->open) {
ret = bdev->bd_op->open(inode, file);
if (ret)
goto out2;
}
bdev->bd_openers++;
bdev->bd_inode->i_size = blkdev_size(dev);
bdev->bd_inode->i_blkbits = blksize_bits(block_size(dev));
if (!bdev->bd_openers) {
unsigned bsize = bdev_hardsect_size(bdev);
while (bsize < PAGE_CACHE_SIZE) {
if (bdev->bd_inode->i_size & bsize)
break;
bsize <<= 1;
}
bdev->bd_block_size = bsize;
bdev->bd_inode->i_blkbits = blksize_bits(bsize);
}
bdev->bd_openers++;
unlock_kernel();
up(&bdev->bd_sem);
return 0;
......@@ -627,6 +624,7 @@ static int do_open(struct block_device *bdev, struct inode *inode, struct file *
out2:
if (!bdev->bd_openers) {
bdev->bd_op = NULL;
bdev->bd_inode->i_data.ra_pages = &default_ra_pages;
if (bdev != bdev->bd_contains) {
blkdev_put(bdev->bd_contains, BDEV_RAW);
bdev->bd_contains = NULL;
......@@ -700,6 +698,7 @@ int blkdev_put(struct block_device *bdev, int kind)
__MOD_DEC_USE_COUNT(bdev->bd_op->owner);
if (!bdev->bd_openers) {
bdev->bd_op = NULL;
bdev->bd_inode->i_data.ra_pages = &default_ra_pages;
if (bdev != bdev->bd_contains) {
blkdev_put(bdev->bd_contains, BDEV_RAW);
bdev->bd_contains = NULL;
......
......@@ -439,21 +439,6 @@ __get_hash_table(struct block_device *bdev, sector_t block, int unused)
return ret;
}
struct buffer_head *get_hash_table(kdev_t dev, sector_t block, int size)
{
struct block_device *bdev;
struct buffer_head *bh;
bdev = bdget(kdev_t_to_nr(dev));
if (!bdev) {
printk("No block device for %s\n", __bdevname(dev));
BUG();
}
bh = __get_hash_table(bdev, block, size);
atomic_dec(&bdev->bd_count);
return bh;
}
EXPORT_SYMBOL(get_hash_table);
void buffer_insert_list(spinlock_t *lock,
struct buffer_head *bh, struct list_head *list)
{
......@@ -988,21 +973,6 @@ __getblk(struct block_device *bdev, sector_t block, int size)
}
}
struct buffer_head *getblk(kdev_t dev, sector_t block, int size)
{
struct block_device *bdev;
struct buffer_head *bh;
bdev = bdget(kdev_t_to_nr(dev));
if (!bdev) {
printk("No block device for %s\n", __bdevname(dev));
BUG();
}
bh = __getblk(bdev, block, size);
atomic_dec(&bdev->bd_count);
return bh;
}
EXPORT_SYMBOL(getblk);
/*
* The relationship between dirty buffers and dirty pages:
*
......@@ -1071,7 +1041,7 @@ void __bforget(struct buffer_head * buf)
}
/**
* bread() - reads a specified block and returns the bh
* __bread() - reads a specified block and returns the bh
* @block: number of block
* @size: size (in bytes) to read
*
......@@ -1102,21 +1072,6 @@ struct buffer_head * __bread(struct block_device *bdev, int block, int size)
return NULL;
}
struct buffer_head *bread(kdev_t dev, int block, int size)
{
struct block_device *bdev;
struct buffer_head *bh;
bdev = bdget(kdev_t_to_nr(dev));
if (!bdev) {
printk("No block device for %s\n", __bdevname(dev));
BUG();
}
bh = __bread(bdev, block, size);
atomic_dec(&bdev->bd_count);
return bh;
}
EXPORT_SYMBOL(bread);
void set_bh_page(struct buffer_head *bh,
struct page *page, unsigned long offset)
{
......@@ -1231,6 +1186,7 @@ int block_flushpage(struct page *page, unsigned long offset)
return 1;
}
EXPORT_SYMBOL(block_flushpage);
/*
* We attach and possibly dirty the buffers atomically wrt
......
......@@ -1330,7 +1330,7 @@ static journal_t *ext3_get_dev_journal(struct super_block *sb,
sb_block = EXT3_MIN_BLOCK_SIZE / blocksize;
offset = EXT3_MIN_BLOCK_SIZE % blocksize;
set_blocksize(j_dev, blocksize);
set_blocksize(bdev, blocksize);
if (!(bh = __bread(bdev, sb_block, blocksize))) {
printk(KERN_ERR "EXT3-fs: couldn't read superblock of "
"external journal\n");
......
......@@ -324,6 +324,12 @@ static inline int exec_permission_lite(struct inode *inode)
if (mode & MAY_EXEC)
return 0;
if ((inode->i_mode & S_IXUGO) && capable(CAP_DAC_OVERRIDE))
return 0;
if (S_ISDIR(inode->i_mode) && capable(CAP_DAC_READ_SEARCH))
return 0;
return -EACCES;
}
......
......@@ -253,7 +253,16 @@ static void check_partition(struct gendisk *hd, kdev_t dev, int first_part_minor
bdev = bdget(kdev_t_to_nr(dev));
bdev->bd_contains = bdev;
bdev->bd_inode->i_size = (loff_t)hd->part[minor(dev)].nr_sects << 9;
bdev->bd_inode->i_blkbits = blksize_bits(block_size(dev));
if (!bdev->bd_openers) {
unsigned bsize = bdev_hardsect_size(bdev);
while (bsize < PAGE_CACHE_SIZE) {
if (bdev->bd_inode->i_size & bsize)
break;
bsize <<= 1;
}
bdev->bd_block_size = bsize;
bdev->bd_inode->i_blkbits = blksize_bits(bsize);
}
for (i = 0; check_part[i]; i++) {
int res;
res = check_part[i](hd, bdev, first_sector, first_part_minor);
......@@ -477,12 +486,6 @@ int wipe_partitions(kdev_t dev)
g->part[minor].start_sect = 0;
g->part[minor].nr_sects = 0;
}
/* some places do blksize_size[major][minor] = 1024,
as preparation for reading partition table - superfluous */
/* sd.c used to set blksize_size to 2048 in case
rscsi_disks[target].device->sector_size == 2048 */
return 0;
}
......
......@@ -713,21 +713,20 @@ efi_partition(struct gendisk *hd, struct block_device *bdev,
unsigned long first_sector, int first_part_minor)
{
kdev_t dev = to_kdev_t(bdev->bd_dev);
int hardblocksize = bdev_hardsect_size(bdev);
int orig_blksize_size = block_size(dev);
int orig_blksize_size = block_size(bdev);
int rc = 0;
/* Need to change the block size that the block layer uses */
if (orig_blksize_size != hardblocksize)
set_blocksize(dev, hardblocksize);
set_blocksize(bdev, hardblocksize);
rc = add_gpt_partitions(hd, bdev, first_part_minor);
/* change back */
if (orig_blksize_size != hardblocksize)
set_blocksize(dev, orig_blksize_size);
set_blocksize(bdev, orig_blksize_size);
return rc;
}
......
......@@ -516,7 +516,7 @@ struct super_block *get_sb_bdev(struct file_system_type *fs_type,
} else {
s->s_flags = flags;
strncpy(s->s_id, bdevname(bdev), sizeof(s->s_id));
s->s_old_blocksize = block_size(dev);
s->s_old_blocksize = block_size(bdev);
sb_set_blocksize(s, s->s_old_blocksize);
error = fill_super(s, data, flags & MS_VERBOSE ? 1 : 0);
if (error) {
......@@ -540,7 +540,7 @@ void kill_block_super(struct super_block *sb)
{
struct block_device *bdev = sb->s_bdev;
generic_shutdown_super(sb);
set_blocksize(to_kdev_t(bdev->bd_dev), sb->s_old_blocksize);
set_blocksize(bdev, sb->s_old_blocksize);
bd_release(bdev);
blkdev_put(bdev, BDEV_FS);
}
......
......@@ -310,14 +310,13 @@ extern void blk_queue_hardsect_size(request_queue_t *q, unsigned short);
extern void blk_queue_segment_boundary(request_queue_t *q, unsigned long);
extern void blk_queue_assign_lock(request_queue_t *q, spinlock_t *);
extern void blk_queue_prep_rq(request_queue_t *q, prep_rq_fn *pfn);
extern unsigned long *blk_get_ra_pages(kdev_t kdev);
extern unsigned long *blk_get_ra_pages(struct block_device *bdev);
extern int blk_rq_map_sg(request_queue_t *, struct request *, struct scatterlist *);
extern void blk_dump_rq_flags(struct request *, char *);
extern void generic_unplug_device(void *);
extern int * blk_size[MAX_BLKDEV]; /* in units of 1024 bytes */
extern int * blksize_size[MAX_BLKDEV];
#define MAX_PHYS_SEGMENTS 128
#define MAX_HW_SEGMENTS 128
......@@ -335,7 +334,6 @@ extern inline void blk_clear(int major)
#if 0
blk_size_in_bytes[major] = NULL;
#endif
blksize_size[major] = NULL;
}
extern inline int queue_hardsect_size(request_queue_t *q)
......@@ -372,17 +370,9 @@ extern inline unsigned int blksize_bits(unsigned int size)
return bits;
}
extern inline unsigned int block_size(kdev_t dev)
extern inline unsigned int block_size(struct block_device *bdev)
{
int retval = BLOCK_SIZE;
int major = major(dev);
if (blksize_size[major]) {
int minor = minor(dev);
if (blksize_size[major][minor])
retval = blksize_size[major][minor];
}
return retval;
return bdev->bd_block_size;
}
static inline loff_t blkdev_size_in_bytes(kdev_t dev)
......
......@@ -151,10 +151,6 @@ void create_empty_buffers(struct page *, unsigned long,
void end_buffer_io_sync(struct buffer_head *bh, int uptodate);
void buffer_insert_list(spinlock_t *lock,
struct buffer_head *, struct list_head *);
struct buffer_head *get_hash_table(kdev_t dev, sector_t block, int size);
struct buffer_head *getblk(kdev_t dev, sector_t block, int size);
struct buffer_head *bread(kdev_t dev, int block, int size);
/* reiserfs_writepage needs this */
void set_buffer_async_io(struct buffer_head *bh);
......
......@@ -343,6 +343,7 @@ struct block_device {
void * bd_holder;
int bd_holders;
struct block_device * bd_contains;
unsigned bd_block_size;
};
struct inode {
......@@ -1259,7 +1260,7 @@ extern int submit_bh(int, struct buffer_head *);
struct bio;
extern int submit_bio(int, struct bio *);
extern int is_read_only(kdev_t);
extern int set_blocksize(kdev_t, int);
extern int set_blocksize(struct block_device *, int);
extern int sb_set_blocksize(struct super_block *, int);
extern int sb_min_blocksize(struct super_block *, int);
......
......@@ -67,6 +67,7 @@ typedef enum {
IRDA_AIRPORT_DONGLE = 6,
IRDA_OLD_BELKIN_DONGLE = 7,
IRDA_EP7211_IR = 8,
IRDA_MCP2120_DONGLE = 9,
} IRDA_DONGLE;
/* Protocol types to be used for SOCK_DGRAM */
......
......@@ -79,6 +79,7 @@ struct swap_info_struct {
unsigned int flags;
spinlock_t sdev_lock;
struct file *swap_file;
unsigned old_block_size;
unsigned short * swap_map;
unsigned int lowest_bit;
unsigned int highest_bit;
......
......@@ -31,8 +31,6 @@
#include <linux/types.h>
#include <linux/skbuff.h>
#include <linux/netdevice.h>
#include <linux/ppp_defs.h>
#include <linux/ppp-comp.h>
#include <linux/timer.h>
#include <net/irda/irlap_event.h>
......@@ -52,8 +50,32 @@
#define CBROADCAST 0xfe /* Connection broadcast address */
#define XID_FORMAT 0x01 /* Discovery XID format */
/* Nobody seems to use this constant. */
#define LAP_WINDOW_SIZE 8
#define LAP_MAX_QUEUE 10
/* We keep the LAP queue very small to minimise the amount of buffering.
* this improve latency and reduce resource consumption.
* This work only because we have synchronous refilling of IrLAP through
* the flow control mechanism (via scheduler and IrTTP).
* 2 buffers is the minimum we can work with, one that we send while polling
* IrTTP, and another to know that we should not send the pf bit.
* Jean II */
#define LAP_HIGH_THRESHOLD 2
/* Some rare non TTP clients don't implement flow control, and
* so don't comply with the above limit (and neither with this one).
* For IAP and management, it doesn't matter, because they never transmit much.
*.For IrLPT, this should be fixed.
* - Jean II */
#define LAP_MAX_QUEUE 10
/* Please note that all IrDA management frames (LMP/TTP conn req/disc and
* IAS queries) fall in the second category and are sent to LAP even if TTP
* is stopped. This means that those frames will wait only a maximum of
* two (2) data frames before beeing sent on the "wire", which speed up
* new socket setup when the link is saturated.
* Same story for two sockets competing for the medium : if one saturates
* the LAP, when the other want to transmit it only has to wait for
* maximum three (3) packets (2 + one scheduling), which improve performance
* of delay sensitive applications.
* Jean II */
#define NR_EXPECTED 1
#define NR_UNEXPECTED 0
......@@ -216,4 +238,24 @@ void irlap_apply_connection_parameters(struct irlap_cb *self, int now);
#define IRLAP_GET_HEADER_SIZE(self) (LAP_MAX_HEADER)
#define IRLAP_GET_TX_QUEUE_LEN(self) skb_queue_len(&self->txq)
/* Return TRUE if the node is in primary mode (i.e. master)
* - Jean II */
static inline int irlap_is_primary(struct irlap_cb *self)
{
int ret;
switch(self->state) {
case LAP_XMIT_P:
case LAP_NRM_P:
ret = 1;
break;
case LAP_XMIT_S:
case LAP_NRM_S:
ret = 0;
break;
default:
ret = -1;
}
return(ret);
}
#endif
......@@ -132,6 +132,7 @@ struct lap_cb {
struct irlap_cb *irlap; /* Instance of IrLAP layer */
hashbin_t *lsaps; /* LSAP associated with this link */
struct lsap_cb *flow_next; /* Next lsap to be polled for Tx */
__u8 caddr; /* Connection address */
__u32 saddr; /* Source device address */
......@@ -235,6 +236,7 @@ void irlmp_connless_data_indication(struct lsap_cb *, struct sk_buff *);
void irlmp_status_request(void);
void irlmp_status_indication(struct lap_cb *, LINK_STATUS link, LOCK_STATUS lock);
void irlmp_flow_indication(struct lap_cb *self, LOCAL_FLOW flow);
int irlmp_slsap_inuse(__u8 slsap);
__u8 irlmp_find_free_slsap(void);
......@@ -252,7 +254,9 @@ extern struct irlmp_cb *irlmp;
static inline hashbin_t *irlmp_get_cachelog(void) { return irlmp->cachelog; }
static inline int irlmp_get_lap_tx_queue_len(struct lsap_cb *self)
/* Check if LAP queue is full.
* Used by IrTTP for low control, see comments in irlap.h - Jean II */
static inline int irlmp_lap_tx_queue_full(struct lsap_cb *self)
{
if (self == NULL)
return 0;
......@@ -261,7 +265,7 @@ static inline int irlmp_get_lap_tx_queue_len(struct lsap_cb *self)
if (self->lap->irlap == NULL)
return 0;
return IRLAP_GET_TX_QUEUE_LEN(self->lap->irlap);
return(IRLAP_GET_TX_QUEUE_LEN(self->lap->irlap) >= LAP_HIGH_THRESHOLD);
}
/* After doing a irlmp_dup(), this get one of the two socket back into
......@@ -274,6 +278,8 @@ static inline void irlmp_listen(struct lsap_cb *self)
self->dlsap_sel = LSAP_ANY;
self->lap = NULL;
self->lsap_state = LSAP_DISCONNECTED;
/* Started when we received the LM_CONNECT_INDICATION */
del_timer(&self->watchdog_timer);
}
#endif
......@@ -42,11 +42,48 @@
#define TTP_PARAMETERS 0x80
#define TTP_MORE 0x80
#define DEFAULT_INITIAL_CREDIT 14
#define TTP_LOW_THRESHOLD 4
#define TTP_HIGH_THRESHOLD 10
#define TTP_MAX_QUEUE 14
/* Transmission queue sizes */
/* Worst case scenario, two window of data - Jean II */
#define TTP_TX_MAX_QUEUE 14
/* We need to keep at least 5 frames to make sure that we can refill
* appropriately the LAP layer. LAP keeps only two buffers, and we need
* to have 7 to make a full window - Jean II */
#define TTP_TX_LOW_THRESHOLD 5
/* Most clients are synchronous with respect to flow control, so we can
* keep a low number of Tx buffers in TTP - Jean II */
#define TTP_TX_HIGH_THRESHOLD 7
/* Receive queue sizes */
/* Minimum of credit that the peer should hold.
* If the peer has less credits than 9 frames, we will explicitely send
* him some credits (through irttp_give_credit() and a specific frame).
* Note that when we give credits it's likely that it won't be sent in
* this LAP window, but in the next one. So, we make sure that the peer
* has something to send while waiting for credits (one LAP window == 7
* + 1 frames while he process the credits). - Jean II */
#define TTP_RX_MIN_CREDIT 8
/* This is the default maximum number of credits held by the peer, so the
* default maximum number of frames he can send us before needing flow
* control answer from us (this may be negociated differently at TSAP setup).
* We want to minimise the number of times we have to explicitely send some
* credit to the peer, hoping we can piggyback it on the return data. In
* particular, it doesn't make sense for us to send credit more than once
* per LAP window.
* Moreover, giving credits has some latency, so we need strictly more than
* a LAP window, otherwise we may already have credits in our Tx queue.
* But on the other hand, we don't want to keep too many Rx buffer here
* before starting to flow control the other end, so make it exactly one
* LAP window + 1 + MIN_CREDITS. - Jean II */
#define TTP_RX_DEFAULT_CREDIT 16
/* Maximum number of credits we can allow the peer to have, and therefore
* maximum Rx queue size.
* Note that we try to deliver packets to the higher layer every time we
* receive something, so in normal mode the Rx queue will never contains
* more than one or two packets. - Jean II */
#define TTP_RX_MAX_CREDIT 21
/* What clients should use when calling ttp_open_tsap() */
#define DEFAULT_INITIAL_CREDIT TTP_RX_DEFAULT_CREDIT
/* Some priorities for disconnect requests */
#define P_NORMAL 0
......@@ -90,7 +127,7 @@ struct tsap_cb {
struct net_device_stats stats;
struct timer_list todo_timer;
__u32 max_seg_size; /* Max data that fit into an IrLAP frame */
__u8 max_header_size;
......@@ -131,6 +168,7 @@ int irttp_disconnect_request(struct tsap_cb *self, struct sk_buff *skb,
void irttp_flow_request(struct tsap_cb *self, LOCAL_FLOW flow);
void irttp_status_indication(void *instance,
LINK_STATUS link, LOCK_STATUS lock);
void irttp_flow_indication(void *instance, void *sap, LOCAL_FLOW flow);
struct tsap_cb *irttp_dup(struct tsap_cb *self, void *instance);
static __inline __u32 irttp_get_saddr(struct tsap_cb *self)
......@@ -159,6 +197,18 @@ static inline void irttp_listen(struct tsap_cb *self)
self->dtsap_sel = LSAP_ANY;
}
/* Return TRUE if the node is in primary mode (i.e. master)
* - Jean II */
static inline int irttp_is_primary(struct tsap_cb *self)
{
if ((self == NULL) ||
(self->lsap == NULL) ||
(self->lsap->lap == NULL) ||
(self->lsap->lap->irlap == NULL))
return -2;
return(irlap_is_primary(self->lsap->lap->irlap));
}
extern struct irttp_cb *irttp;
#endif /* IRTTP_H */
......@@ -144,6 +144,7 @@ EXPORT_SYMBOL(force_delete);
EXPORT_SYMBOL(follow_up);
EXPORT_SYMBOL(follow_down);
EXPORT_SYMBOL(lookup_mnt);
EXPORT_SYMBOL(path_lookup);
EXPORT_SYMBOL(path_init);
EXPORT_SYMBOL(path_walk);
EXPORT_SYMBOL(path_release);
......@@ -317,7 +318,6 @@ EXPORT_SYMBOL(tty_unregister_driver);
EXPORT_SYMBOL(tty_std_termios);
/* block device driver support */
EXPORT_SYMBOL(blksize_size);
EXPORT_SYMBOL(blk_size);
EXPORT_SYMBOL(blk_dev);
EXPORT_SYMBOL(is_read_only);
......
......@@ -14,6 +14,7 @@
#include <linux/vmalloc.h>
#include <linux/pagemap.h>
#include <linux/shm.h>
#include <linux/blkdev.h>
#include <linux/compiler.h>
#include <asm/pgtable.h>
......@@ -787,8 +788,12 @@ asmlinkage long sys_swapoff(const char * specialfile)
swap_device_unlock(p);
swap_list_unlock();
vfree(swap_map);
if (S_ISBLK(swap_file->f_dentry->d_inode->i_mode))
bd_release(swap_file->f_dentry->d_inode->i_bdev);
if (S_ISBLK(swap_file->f_dentry->d_inode->i_mode)) {
struct block_device *bdev;
bdev = swap_file->f_dentry->d_inode->i_bdev;
set_blocksize(bdev, p->old_block_size);
bd_release(bdev);
}
filp_close(swap_file, NULL);
err = 0;
......@@ -879,6 +884,7 @@ asmlinkage long sys_swapon(const char * specialfile, int swap_flags)
nr_swapfiles = type+1;
p->flags = SWP_USED;
p->swap_file = NULL;
p->old_block_size = 0;
p->swap_map = NULL;
p->lowest_bit = 0;
p->highest_bit = 0;
......@@ -914,7 +920,8 @@ asmlinkage long sys_swapon(const char * specialfile, int swap_flags)
bdev = NULL;
goto bad_swap;
}
error = set_blocksize(swap_file->f_dentry->d_inode->i_rdev,
p->old_block_size = block_size(bdev);
error = set_blocksize(swap_file->f_dentry->d_inode->i_bdev,
PAGE_SIZE);
if (error < 0)
goto bad_swap;
......@@ -1066,8 +1073,10 @@ asmlinkage long sys_swapon(const char * specialfile, int swap_flags)
error = 0;
goto out;
bad_swap:
if (bdev)
if (bdev) {
set_blocksize(bdev, p->old_block_size);
bd_release(bdev);
}
bad_swap_2:
swap_list_lock();
swap_map = p->swap_map;
......
......@@ -568,15 +568,17 @@ static int irda_find_lsap_sel(struct irda_sock *self, char *name)
if(self->iriap == NULL)
return -ENOMEM;
/* Treat unexpected signals as disconnect */
/* Treat unexpected wakeup as disconnect */
self->errno = -EHOSTUNREACH;
/* Query remote LM-IAS */
iriap_getvaluebyclass_request(self->iriap, self->saddr, self->daddr,
name, "IrDA:TinyTP:LsapSel");
/* Wait for answer (if not already failed) */
if(self->iriap != NULL)
interruptible_sleep_on(&self->query_wait);
/* Wait for answer, if not yet finished (or failed) */
if (wait_event_interruptible(self->query_wait, (self->iriap==NULL)))
/* Treat signals as disconnect */
return -EHOSTUNREACH;
/* Check what happened */
if (self->errno)
......@@ -877,16 +879,47 @@ static int irda_accept(struct socket *sock, struct socket *newsock, int flags)
* The read queue this time is holding sockets ready to use
* hooked into the SABM we saved
*/
do {
if ((skb = skb_dequeue(&sk->receive_queue)) == NULL) {
if (flags & O_NONBLOCK)
return -EWOULDBLOCK;
interruptible_sleep_on(sk->sleep);
if (signal_pending(current))
return -ERESTARTSYS;
/*
* We can perform the accept only if there is incomming data
* on the listening socket.
* So, we will block the caller until we receive any data.
* If the caller was waiting on select() or poll() before
* calling us, the data is waiting for us ;-)
* Jean II
*/
skb = skb_dequeue(&sk->receive_queue);
if (skb == NULL) {
int ret = 0;
DECLARE_WAITQUEUE(waitq, current);
/* Non blocking operation */
if (flags & O_NONBLOCK)
return -EWOULDBLOCK;
/* The following code is a cut'n'paste of the
* wait_event_interruptible() macro.
* We don't us the macro because the condition has
* side effects : we want to make sure that only one
* skb get dequeued - Jean II */
add_wait_queue(sk->sleep, &waitq);
for (;;) {
set_current_state(TASK_INTERRUPTIBLE);
skb = skb_dequeue(&sk->receive_queue);
if (skb != NULL)
break;
if (!signal_pending(current)) {
schedule();
continue;
}
ret = -ERESTARTSYS;
break;
}
} while (skb == NULL);
current->state = TASK_RUNNING;
remove_wait_queue(sk->sleep, &waitq);
if(ret)
return -ERESTARTSYS;
}
newsk = newsock->sk;
newsk->state = TCP_ESTABLISHED;
......@@ -1024,19 +1057,9 @@ static int irda_connect(struct socket *sock, struct sockaddr *uaddr,
if (sk->state != TCP_ESTABLISHED && (flags & O_NONBLOCK))
return -EINPROGRESS;
/* Here, there is a race condition : the state may change between
* our test and the sleep, via irda_connect_confirm().
* The way to workaround that is to sleep with a timeout, so that
* we don't sleep forever and check the state when waking up.
* 50ms is plenty good enough, because the LAP is already connected.
* Jean II */
while (sk->state == TCP_SYN_SENT) {
interruptible_sleep_on_timeout(sk->sleep, HZ/20);
if (signal_pending(current)) {
return -ERESTARTSYS;
}
}
if (wait_event_interruptible(*(sk->sleep), (sk->state!=TCP_SYN_SENT)))
return -ERESTARTSYS;
if (sk->state != TCP_ESTABLISHED) {
sock->state = SS_UNCONNECTED;
return sock_error(sk); /* Always set at this point */
......@@ -1280,17 +1303,14 @@ static int irda_sendmsg(struct socket *sock, struct msghdr *msg, int len,
ASSERT(self != NULL, return -1;);
/* Check if IrTTP is wants us to slow down */
while (self->tx_flow == FLOW_STOP) {
IRDA_DEBUG(2, __FUNCTION__ "(), IrTTP is busy, going to sleep!\n");
interruptible_sleep_on(sk->sleep);
/* Check if we are still connected */
if (sk->state != TCP_ESTABLISHED)
return -ENOTCONN;
/* Handle signals */
if (signal_pending(current))
return -ERESTARTSYS;
}
if (wait_event_interruptible(*(sk->sleep),
(self->tx_flow != FLOW_STOP || sk->state != TCP_ESTABLISHED)))
return -ERESTARTSYS;
/* Check if we are still connected */
if (sk->state != TCP_ESTABLISHED)
return -ENOTCONN;
/* Check that we don't send out to big frames */
if (len > self->max_data_size) {
......@@ -1382,14 +1402,23 @@ static int irda_recvmsg_dgram(struct socket *sock, struct msghdr *msg,
*
* Sleep until data has arrive. But check for races..
*
* The caller is expected to deal with the situation when we return
* due to pending signals. And even if not, the peeked skb might have
* been already dequeued due to concurrent operation.
* Currently irda_recvmsg_stream() is the only caller and is ok.
* Return 0 if condition packet has arrived, -ERESTARTSYS if signal_pending()
* Only used once in irda_recvmsg_stream() -> inline
*/
static void irda_data_wait(struct sock *sk)
static inline int irda_data_wait(struct sock *sk)
{
int ret = 0;
if (!skb_peek(&sk->receive_queue)) {
set_bit(SOCK_ASYNC_WAITDATA, &sk->socket->flags);
interruptible_sleep_on(sk->sleep);
__wait_event_interruptible(*(sk->sleep),
(skb_peek(&sk->receive_queue)!=NULL), ret);
clear_bit(SOCK_ASYNC_WAITDATA, &sk->socket->flags);
}
return(ret);
}
/*
......@@ -1444,8 +1473,8 @@ static int irda_recvmsg_stream(struct socket *sock, struct msghdr *msg,
if (noblock)
return -EAGAIN;
irda_data_wait(sk);
if (signal_pending(current))
/* Wait process until data arrives */
if (irda_data_wait(sk))
return -ERESTARTSYS;
continue;
}
......@@ -2281,7 +2310,12 @@ static int irda_getsockopt(struct socket *sock, int level, int optname,
self->iriap = iriap_open(LSAP_ANY, IAS_CLIENT, self,
irda_getvalue_confirm);
/* Treat unexpected signals as disconnect */
if (self->iriap == NULL) {
kfree(ias_opt);
return -ENOMEM;
}
/* Treat unexpected wakeup as disconnect */
self->errno = -EHOSTUNREACH;
/* Query remote LM-IAS */
......@@ -2289,9 +2323,17 @@ static int irda_getsockopt(struct socket *sock, int level, int optname,
self->saddr, daddr,
ias_opt->irda_class_name,
ias_opt->irda_attrib_name);
/* Wait for answer (if not already failed) */
if(self->iriap != NULL)
interruptible_sleep_on(&self->query_wait);
/* Wait for answer, if not yet finished (or failed) */
if (wait_event_interruptible(self->query_wait,
(self->iriap == NULL))) {
/* pending request uses copy of ias_opt-content
* we can free it regardless! */
kfree(ias_opt);
/* Treat signals as disconnect */
return -EHOSTUNREACH;
}
/* Check what happened */
if (self->errno)
{
......@@ -2348,12 +2390,14 @@ static int irda_getsockopt(struct socket *sock, int level, int optname,
irlmp_update_client(self->ckey, self->mask,
irda_selective_discovery_indication,
NULL, (void *) self);
/* Do some discovery (and also return cached results) */
irlmp_discovery_request(self->nslots);
/* Wait until a node is discovered */
if (!self->cachediscovery) {
int ret = 0;
IRDA_DEBUG(1, __FUNCTION__
"(), nothing discovered yet, going to sleep...\n");
......@@ -2362,9 +2406,12 @@ static int irda_getsockopt(struct socket *sock, int level, int optname,
self->watchdog.data = (unsigned long) self;
self->watchdog.expires = jiffies + (val * HZ/1000);
add_timer(&(self->watchdog));
self->errno = 0;
/* Wait for IR-LMP to call us back */
interruptible_sleep_on(&self->query_wait);
__wait_event_interruptible(self->query_wait,
(self->cachediscovery!=NULL || self->errno==-ETIME),
ret);
/* If watchdog is still activated, kill it! */
if(timer_pending(&(self->watchdog)))
......@@ -2372,6 +2419,9 @@ static int irda_getsockopt(struct socket *sock, int level, int optname,
IRDA_DEBUG(1, __FUNCTION__
"(), ...waking up !\n");
if (ret != 0)
return ret;
}
else
IRDA_DEBUG(1, __FUNCTION__
......
......@@ -453,8 +453,21 @@ static int ircomm_tty_open(struct tty_struct *tty, struct file *filp)
*/
if (tty_hung_up_p(filp) ||
(self->flags & ASYNC_CLOSING)) {
if (self->flags & ASYNC_CLOSING)
interruptible_sleep_on(&self->close_wait);
/* Hm, why are we blocking on ASYNC_CLOSING if we
* do return -EAGAIN/-ERESTARTSYS below anyway?
* IMHO it's either not needed in the first place
* or for some reason we need to make sure the async
* closing has been finished - if so, wouldn't we
* probably better sleep uninterruptible?
*/
if (wait_event_interruptible(self->close_wait, !(self->flags&ASYNC_CLOSING))) {
WARNING("%s - got signal while blocking on ASYNC_CLOSING!\n",
__FUNCTION__);
return -ERESTARTSYS;
}
/* MOD_DEC_USE_COUNT; "info->tty" will cause this? */
#ifdef SERIAL_DO_RESTART
return ((self->flags & ASYNC_HUP_NOTIFY) ?
......
......@@ -368,6 +368,12 @@ int irda_task_kick(struct irda_task *task)
* time to complete. We do it this hairy way since we may have been
* called from interrupt context, so it's not possible to use
* schedule_timeout()
* Two important notes :
* o Make sure you irda_task_delete(task); in case you delete the
* calling instance.
* o No real need to lock when calling this function, but you may
* want to lock within the task handler.
* Jean II
*/
struct irda_task *irda_task_execute(void *instance,
IRDA_TASK_CALLBACK function,
......@@ -466,6 +472,9 @@ int irda_device_txqueue_empty(struct net_device *dev)
* Function irda_device_init_dongle (self, type, qos)
*
* Initialize attached dongle.
*
* Important : request_module require us to call this function with
* a process context and irq enabled. - Jean II
*/
dongle_t *irda_device_dongle_init(struct net_device *dev, int type)
{
......@@ -477,6 +486,7 @@ dongle_t *irda_device_dongle_init(struct net_device *dev, int type)
#ifdef CONFIG_KMOD
{
char modname[32];
ASSERT(!in_interrupt(), return NULL;);
/* Try to load the module needed */
sprintf(modname, "irda-dongle-%d", type);
request_module(modname);
......
......@@ -253,19 +253,45 @@ void irlap_do_event(struct irlap_cb *self, IRLAP_EVENT event,
case LAP_XMIT_P: /* FALLTHROUGH */
case LAP_XMIT_S:
/*
* We just received the pf bit and are at the beginning
* of a new LAP transmit window.
* Check if there are any queued data frames, and do not
* try to disconnect link if we send any data frames, since
* that will change the state away form XMIT
*/
IRDA_DEBUG(2, __FUNCTION__ "() : queue len = %d\n",
skb_queue_len(&self->txq));
if (skb_queue_len(&self->txq)) {
/* Prevent race conditions with irlap_data_request() */
self->local_busy = TRUE;
/* Theory of operation.
* We send frames up to when we fill the window or
* reach line capacity. Those frames will queue up
* in the device queue, and the driver will slowly
* send them.
* After each frame that we send, we poll the higher
* layer for more data. It's the right time to do
* that because the link layer need to perform the mtt
* and then send the first frame, so we can afford
* to send a bit of time in kernel space.
* The explicit flow indication allow to minimise
* buffers (== lower latency), to avoid higher layer
* polling via timers (== less context switches) and
* to implement a crude scheduler - Jean II */
/* Try to send away all queued data frames */
while ((skb = skb_dequeue(&self->txq)) != NULL) {
/* Send one frame */
ret = (*state[self->state])(self, SEND_I_CMD,
skb, NULL);
kfree_skb(skb);
/* Poll the higher layers for one more frame */
irlmp_flow_indication(self->notify.instance,
FLOW_START);
if (ret == -EPROTO)
break; /* Try again later! */
}
......
......@@ -236,6 +236,16 @@ void irlmp_close_lsap(struct lsap_cb *self)
lap = self->lap;
if (lap) {
ASSERT(lap->magic == LMP_LAP_MAGIC, return;);
/* We might close a LSAP before it has completed the
* connection setup. In those case, higher layers won't
* send a proper disconnect request. Harmless, except
* that we will forget to close LAP... - Jean II */
if(self->lsap_state != LSAP_DISCONNECTED) {
self->lsap_state = LSAP_DISCONNECTED;
irlmp_do_lap_event(self->lap,
LM_LAP_DISCONNECT_REQUEST, NULL);
}
/* Now, remove from the link */
lsap = hashbin_remove(lap->lsaps, (int) self, NULL);
}
self->lap = NULL;
......@@ -1212,6 +1222,72 @@ void irlmp_status_indication(struct lap_cb *self,
}
}
/*
* Receive flow control indication from LAP.
* LAP want us to send it one more frame. We implement a simple round
* robin scheduler between the active sockets so that we get a bit of
* fairness. Note that the round robin is far from perfect, but it's
* better than nothing.
* We then poll the selected socket so that we can do synchronous
* refilling of IrLAP (which allow to minimise the number of buffers).
* Jean II
*/
void irlmp_flow_indication(struct lap_cb *self, LOCAL_FLOW flow)
{
struct lsap_cb *next;
struct lsap_cb *curr;
int lsap_todo;
ASSERT(self->magic == LMP_LAP_MAGIC, return;);
ASSERT(flow == FLOW_START, return;);
/* Get the number of lsap. That's the only safe way to know
* that we have looped around... - Jean II */
lsap_todo = HASHBIN_GET_SIZE(self->lsaps);
IRDA_DEBUG(4, __FUNCTION__ "() : %d lsaps to scan\n", lsap_todo);
/* Poll lsap in order until the queue is full or until we
* tried them all.
* Most often, the current LSAP will have something to send,
* so we will go through this loop only once. - Jean II */
while((lsap_todo--) &&
(IRLAP_GET_TX_QUEUE_LEN(self->irlap) < LAP_HIGH_THRESHOLD)) {
/* Try to find the next lsap we should poll. */
next = self->flow_next;
if(next != NULL) {
/* Note that if there is only one LSAP on the LAP
* (most common case), self->flow_next is always NULL,
* so we always avoid this loop. - Jean II */
IRDA_DEBUG(4, __FUNCTION__ "() : searching my LSAP\n");
/* We look again in hashbins, because the lsap
* might have gone away... - Jean II */
curr = (struct lsap_cb *) hashbin_get_first(self->lsaps);
while((curr != NULL ) && (curr != next))
curr = (struct lsap_cb *) hashbin_get_next(self->lsaps);
} else
curr = NULL;
/* If we have no lsap, restart from first one */
if(curr == NULL)
curr = (struct lsap_cb *) hashbin_get_first(self->lsaps);
/* Uh-oh... Paranoia */
if(curr == NULL)
break;
/* Next time, we will get the next one (or the first one) */
self->flow_next = (struct lsap_cb *) hashbin_get_next(self->lsaps);
IRDA_DEBUG(4, __FUNCTION__ "() : curr is %p, next was %p and is now %p, still %d to go - queue len = %d\n", curr, next, self->flow_next, lsap_todo, IRLAP_GET_TX_QUEUE_LEN(self->irlap));
/* Inform lsap user that it can send one more packet. */
if (curr->notify.flow_indication != NULL)
curr->notify.flow_indication(curr->notify.instance,
curr, flow);
else
IRDA_DEBUG(1, __FUNCTION__ "(), no handler\n");
}
}
/*
* Function irlmp_hint_to_service (hint)
*
......
......@@ -217,6 +217,11 @@
* Better fix in irnet_disconnect_indication() :
* - if connected, kill pppd via hangup.
* - if not connected, reenable ppp Tx, which trigger IrNET retry.
*
* v12 - 10.4.02 - Jean II
* o Fix race condition in irnet_connect_indication().
* If the socket was already trying to connect, drop old connection
* and use new one only if acting as primary. See comments.
*/
/***************************** INCLUDES *****************************/
......
......@@ -1340,46 +1340,80 @@ irnet_connect_indication(void * instance,
return;
}
/* Socket connecting ?
* Clear up flag : prevent irnet_disconnect_indication() to mess up tsap */
if(test_and_clear_bit(0, &new->ttp_connect))
{
/* The socket is trying to connect to the other end and may have sent
* a IrTTP connection request and is waiting for a connection response
* (that may never come).
* Now, the pain is that the socket may have opened a tsap and is
* waiting on it, while the other end is trying to connect to it on
* another tsap.
*/
DERROR(IRDA_CB_ERROR, "Socket already connecting. Ouch !\n");
/* The following code is a bit tricky, so need comments ;-)
*/
/* If ttp_connect is set, the socket is trying to connect to the other
* end and may have sent a IrTTP connection request and is waiting for
* a connection response (that may never come).
* Now, the pain is that the socket may have opened a tsap and is
* waiting on it, while the other end is trying to connect to it on
* another tsap.
* Because IrNET can be peer to peer, we need to workaround this.
* Furthermore, the way the irnetd script is implemented, the
* target will create a second IrNET connection back to the
* originator and expect the originator to bind this new connection
* to the original PPPD instance.
* And of course, if we don't use irnetd, we can have a race when
* both side try to connect simultaneously, which could leave both
* connections half closed (yuck).
* Conclusions :
* 1) The "originator" must accept the new connection and get rid
* of the old one so that irnetd works
* 2) One side must deny the new connection to avoid races,
* but both side must agree on which side it is...
* Most often, the originator is primary at the LAP layer.
* Jean II
*/
/* Now, let's look at the way I wrote the test...
* We need to clear up the ttp_connect flag atomically to prevent
* irnet_disconnect_indication() to mess up the tsap we are going to close.
* We want to clear the ttp_connect flag only if we close the tsap,
* otherwise we will never close it, so we need to check for primary
* *before* doing the test on the flag.
* And of course, ALLOW_SIMULT_CONNECT can disable this entirely...
* Jean II
*/
/* Socket already connecting ? On primary ? */
if(0
#ifdef ALLOW_SIMULT_CONNECT
/* Cleanup the TSAP if necessary - IrIAP will be cleaned up later */
|| ((irttp_is_primary(server->tsap) == 1) /* primary */
&& (test_and_clear_bit(0, &new->ttp_connect)))
#endif /* ALLOW_SIMULT_CONNECT */
)
{
DERROR(IRDA_CB_ERROR, "Socket already connecting, but going to reuse it !\n");
/* Cleanup the old TSAP if necessary - IrIAP will be cleaned up later */
if(new->tsap != NULL)
{
/* Close the connection the new socket was attempting.
* This seems to be safe... */
/* Close the old connection the new socket was attempting,
* so that we can hook it up to the new connection.
* It's now safe to do it... */
irttp_close_tsap(new->tsap);
new->tsap = NULL;
}
/* Note : no return, fall through... */
#else /* ALLOW_SIMULT_CONNECT */
irnet_disconnect_server(server, skb);
return;
#endif /* ALLOW_SIMULT_CONNECT */
}
else
/* If socket is not connecting or connected, tsap should be NULL */
if(new->tsap != NULL)
{
/* If we are here, we are also in irnet_disconnect_indication(),
* and it's a nice race condition... On the other hand, we can't be
* in irda_irnet_destroy() otherwise we would not have found the
* socket in the hashbin. */
/* Better get out of here, otherwise we will mess up tsaps ! */
DERROR(IRDA_CB_ERROR, "Race condition detected, abort connect...\n");
irnet_disconnect_server(server, skb);
return;
}
{
/* Three options :
* 1) socket was not connecting or connected : ttp_connect should be 0.
* 2) we don't want to connect the socket because we are secondary or
* ALLOW_SIMULT_CONNECT is undefined. ttp_connect should be 1.
* 3) we are half way in irnet_disconnect_indication(), and it's a
* nice race condition... Fortunately, we can detect that by checking
* if tsap is still alive. On the other hand, we can't be in
* irda_irnet_destroy() otherwise we would not have found this
* socket in the hashbin.
* Jean II */
if((test_bit(0, &new->ttp_connect)) || (new->tsap != NULL))
{
/* Don't mess this socket, somebody else in in charge... */
DERROR(IRDA_CB_ERROR, "Race condition detected, socket in use, abort connect...\n");
irnet_disconnect_server(server, skb);
return;
}
}
/* So : at this point, we have a socket, and it is idle. Good ! */
irnet_connect_socket(server, new, qos, max_sdu_size, max_header_size);
......
This diff is collapsed.
......@@ -28,6 +28,7 @@
*
********************************************************************/
#include <linux/types.h>
#include <asm/unaligned.h>
#include <asm/byteorder.h>
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment