Commit 64994feb authored by David S. Miller's avatar David S. Miller

Merge davem@nuts.davemloft.net:/disk1/BK/sparc-2.6

into kernel.bkbits.net:/home/davem/sparc-2.6
parents 7208d2ce fe0353f0
......@@ -74,7 +74,7 @@ This driver provides the following features:
3. The CDROM drive should be connected to the host on an IDE
interface. Each interface on a system is defined by an I/O port
address and an IRQ number, the standard assignments being
0x170 and 14 for the primary interface and 0x1f0 and 15 for the
0x1f0 and 14 for the primary interface and 0x170 and 15 for the
secondary interface. Each interface can control up to two devices,
where each device can be a hard drive, a CDROM drive, a floppy drive,
or a tape drive. The two devices on an interface are called `master'
......@@ -268,8 +268,8 @@ b. Timeout/IRQ errors.
- Double-check your hardware configuration to make sure that the IRQ
number of your IDE interface matches what the driver expects.
(The usual assignments are 14 for the primary (0x170) interface
and 15 for the secondary (0x1f0) interface.) Also be sure that
(The usual assignments are 14 for the primary (0x1f0) interface
and 15 for the secondary (0x170) interface.) Also be sure that
you don't have some other hardware which might be conflicting with
the IRQ you're using. Also check the BIOS setup for your system;
some have the ability to disable individual IRQ levels, and I've
......
......@@ -369,8 +369,8 @@ do_sys_ptrace(long request, long pid, long addr, long data,
/* Mark single stepping. */
child->thread_info->bpt_nsaved = -1;
clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
wake_up_process(child);
child->exit_code = data;
wake_up_process(child);
/* give it a chance to run. */
ret = 0;
goto out;
......
......@@ -694,7 +694,7 @@ static inline void move_irq(int irq) { }
#endif /* CONFIG_IRQBALANCE */
#ifndef CONFIG_SMP
void send_IPI_self(int vector)
void fastcall send_IPI_self(int vector)
{
unsigned int cfg;
......
......@@ -493,7 +493,7 @@ int dump_task_regs(struct task_struct *tsk, elf_gregset_t *regs)
* the task-switch, and shows up in ret_from_fork in entry.S,
* for example.
*/
struct task_struct * __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
struct task_struct fastcall * __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
{
struct thread_struct *prev = &prev_p->thread,
*next = &next_p->thread;
......
......@@ -551,7 +551,7 @@ handle_signal(unsigned long sig, siginfo_t *info, sigset_t *oldset,
* want to handle. Thus you cannot kill init even with a SIGKILL even by
* mistake.
*/
int do_signal(struct pt_regs *regs, sigset_t *oldset)
int fastcall do_signal(struct pt_regs *regs, sigset_t *oldset)
{
siginfo_t info;
int signr;
......
......@@ -150,7 +150,7 @@ inline void __send_IPI_shortcut(unsigned int shortcut, int vector)
apic_write_around(APIC_ICR, cfg);
}
void send_IPI_self(int vector)
void fastcall send_IPI_self(int vector)
{
__send_IPI_shortcut(APIC_DEST_SELF, vector);
}
......
......@@ -95,7 +95,7 @@
#define VM86_REGS_SIZE2 (sizeof(struct kernel_vm86_regs) - VM86_REGS_SIZE1)
struct pt_regs * FASTCALL(save_v86_state(struct kernel_vm86_regs * regs));
struct pt_regs * save_v86_state(struct kernel_vm86_regs * regs)
struct pt_regs * fastcall save_v86_state(struct kernel_vm86_regs * regs)
{
struct tss_struct *tss;
struct pt_regs *ret;
......
......@@ -1277,20 +1277,6 @@ COMPATIBLE_IOCTL(SBPROF_ZBSTOP)
COMPATIBLE_IOCTL(SBPROF_ZBWAITFULL)
#endif /* CONFIG_SIBYTE_TBPROF */
#if defined(CONFIG_BLK_DEV_DM) || defined(CONFIG_BLK_DEV_DM_MODULE)
COMPATIBLE_IOCTL(DM_VERSION)
COMPATIBLE_IOCTL(DM_REMOVE_ALL)
COMPATIBLE_IOCTL(DM_DEV_CREATE)
COMPATIBLE_IOCTL(DM_DEV_REMOVE)
COMPATIBLE_IOCTL(DM_DEV_RELOAD)
COMPATIBLE_IOCTL(DM_DEV_SUSPEND)
COMPATIBLE_IOCTL(DM_DEV_RENAME)
COMPATIBLE_IOCTL(DM_DEV_DEPS)
COMPATIBLE_IOCTL(DM_DEV_STATUS)
COMPATIBLE_IOCTL(DM_TARGET_STATUS)
COMPATIBLE_IOCTL(DM_TARGET_WAIT)
#endif /* CONFIG_BLK_DEV_DM */
COMPATIBLE_IOCTL(MTIOCTOP) /* mtio.h ioctls */
HANDLE_IOCTL(MTIOCGET32, mt_ioctl_trans)
HANDLE_IOCTL(MTIOCPOS32, mt_ioctl_trans)
......
......@@ -300,10 +300,6 @@ config VIOCD
If you are running Linux on an IBM iSeries system and you want to
read a CD drive owned by OS/400, say Y here.
config VIOCD_AZTECH
bool "iSeries Virtual CD Aztech emulation"
depends on VIOCD
config VIOTAPE
tristate "iSeries Virtual Tape Support"
help
......
......@@ -43,6 +43,7 @@
#include <asm/iommu.h>
#include <asm/pci-bridge.h>
#include <asm/iSeries/iSeries_pci.h>
#include <asm/iSeries/vio.h>
#include <asm/machdep.h>
......@@ -58,11 +59,6 @@ static struct iSeries_Device_Node vio_dev_node = { .LogicalSlot = 0xFF, .iommu_
static struct pci_dev _veth_dev = { .sysdata = &veth_dev_node };
static struct pci_dev _vio_dev = { .sysdata = &vio_dev_node, .dev.bus = &pci_bus_type };
/*
* I wonder what the deal is with these. Nobody uses them. Why do they
* exist? Why do we export them to modules? Why is this comment here, and
* why didn't I just delete them?
*/
struct pci_dev *iSeries_veth_dev = &_veth_dev;
struct device *iSeries_vio_dev = &_vio_dev.dev;
......
......@@ -38,10 +38,9 @@
#include <asm/iSeries/ItSpCommArea.h>
#include <asm/iSeries/iSeries_proc.h>
#include <asm/uaccess.h>
#include <linux/pci.h>
#include <linux/dma-mapping.h>
#include <linux/bcd.h>
extern struct pci_dev *iSeries_vio_dev;
#include <asm/iSeries/vio.h>
/*
* This is the structure layout for the Machine Facilites LPAR event
......@@ -791,7 +790,8 @@ void mf_setCmdLine(const char *cmdline, int size, u64 side)
{
struct VspCmdData myVspCmd;
dma_addr_t dma_addr = 0;
char *page = pci_alloc_consistent(iSeries_vio_dev, size, &dma_addr);
char *page = dma_alloc_coherent(iSeries_vio_dev, size, &dma_addr,
GFP_ATOMIC);
if (page == NULL) {
printk(KERN_ERR "mf.c: couldn't allocate memory to set command line\n");
......@@ -809,7 +809,7 @@ void mf_setCmdLine(const char *cmdline, int size, u64 side)
mb();
(void)signal_vsp_instruction(&myVspCmd);
pci_free_consistent(iSeries_vio_dev, size, page, dma_addr);
dma_free_coherent(iSeries_vio_dev, size, page, dma_addr);
}
int mf_getCmdLine(char *cmdline, int *size, u64 side)
......@@ -819,8 +819,8 @@ int mf_getCmdLine(char *cmdline, int *size, u64 side)
int len = *size;
dma_addr_t dma_addr;
dma_addr = pci_map_single(iSeries_vio_dev, cmdline, len,
PCI_DMA_FROMDEVICE);
dma_addr = dma_map_single(iSeries_vio_dev, cmdline, len,
DMA_FROM_DEVICE);
memset(cmdline, 0, len);
memset(&myVspCmd, 0, sizeof(myVspCmd));
myVspCmd.cmd = 33;
......@@ -840,7 +840,7 @@ int mf_getCmdLine(char *cmdline, int *size, u64 side)
#endif
}
pci_unmap_single(iSeries_vio_dev, dma_addr, *size, PCI_DMA_FROMDEVICE);
dma_unmap_single(iSeries_vio_dev, dma_addr, *size, DMA_FROM_DEVICE);
return len;
}
......@@ -851,7 +851,8 @@ int mf_setVmlinuxChunk(const char *buffer, int size, int offset, u64 side)
struct VspCmdData myVspCmd;
int rc;
dma_addr_t dma_addr = 0;
char *page = pci_alloc_consistent(iSeries_vio_dev, size, &dma_addr);
char *page = dma_alloc_coherent(iSeries_vio_dev, size, &dma_addr,
GFP_ATOMIC);
if (page == NULL) {
printk(KERN_ERR "mf.c: couldn't allocate memory to set vmlinux chunk\n");
......@@ -876,7 +877,7 @@ int mf_setVmlinuxChunk(const char *buffer, int size, int offset, u64 side)
rc = -ENOMEM;
}
pci_free_consistent(iSeries_vio_dev, size, page, dma_addr);
dma_free_coherent(iSeries_vio_dev, size, page, dma_addr);
return rc;
}
......@@ -888,8 +889,8 @@ int mf_getVmlinuxChunk(char *buffer, int *size, int offset, u64 side)
int len = *size;
dma_addr_t dma_addr;
dma_addr = pci_map_single(iSeries_vio_dev, buffer, len,
PCI_DMA_FROMDEVICE);
dma_addr = dma_map_single(iSeries_vio_dev, buffer, len,
DMA_FROM_DEVICE);
memset(buffer, 0, len);
memset(&myVspCmd, 0, sizeof(myVspCmd));
myVspCmd.cmd = 32;
......@@ -907,7 +908,7 @@ int mf_getVmlinuxChunk(char *buffer, int *size, int offset, u64 side)
rc = -ENOMEM;
}
pci_unmap_single(iSeries_vio_dev, dma_addr, len, PCI_DMA_FROMDEVICE);
dma_unmap_single(iSeries_vio_dev, dma_addr, len, DMA_FROM_DEVICE);
return rc;
}
......
......@@ -184,13 +184,13 @@ int ste_allocate(unsigned long ea)
/* Kernel or user address? */
if (REGION_ID(ea) >= KERNEL_REGION_ID) {
vsid = get_kernel_vsid(ea);
context = REGION_ID(ea);
context = KERNEL_CONTEXT(ea);
} else {
if (!current->mm)
return 1;
context = current->mm->context;
vsid = get_vsid(context, ea);
vsid = get_vsid(context.id, ea);
}
esid = GET_ESID(ea);
......@@ -223,7 +223,7 @@ static void preload_stab(struct task_struct *tsk, struct mm_struct *mm)
if (!IS_VALID_EA(pc) || (REGION_ID(pc) >= KERNEL_REGION_ID))
return;
vsid = get_vsid(mm->context, pc);
vsid = get_vsid(mm->context.id, pc);
__ste_allocate(pc_esid, vsid);
if (pc_esid == stack_esid)
......@@ -231,7 +231,7 @@ static void preload_stab(struct task_struct *tsk, struct mm_struct *mm)
if (!IS_VALID_EA(stack) || (REGION_ID(stack) >= KERNEL_REGION_ID))
return;
vsid = get_vsid(mm->context, stack);
vsid = get_vsid(mm->context.id, stack);
__ste_allocate(stack_esid, vsid);
if (pc_esid == unmapped_base_esid || stack_esid == unmapped_base_esid)
......@@ -240,7 +240,7 @@ static void preload_stab(struct task_struct *tsk, struct mm_struct *mm)
if (!IS_VALID_EA(unmapped_base) ||
(REGION_ID(unmapped_base) >= KERNEL_REGION_ID))
return;
vsid = get_vsid(mm->context, unmapped_base);
vsid = get_vsid(mm->context.id, unmapped_base);
__ste_allocate(unmapped_base_esid, vsid);
/* Order update */
......@@ -406,14 +406,14 @@ int slb_allocate(unsigned long ea)
/* Kernel or user address? */
if (REGION_ID(ea) >= KERNEL_REGION_ID) {
context = REGION_ID(ea);
context = KERNEL_CONTEXT(ea);
vsid = get_kernel_vsid(ea);
} else {
if (unlikely(!current->mm))
return 1;
context = current->mm->context;
vsid = get_vsid(context, ea);
vsid = get_vsid(context.id, ea);
}
esid = GET_ESID(ea);
......@@ -444,7 +444,7 @@ static void preload_slb(struct task_struct *tsk, struct mm_struct *mm)
if (!IS_VALID_EA(pc) || (REGION_ID(pc) >= KERNEL_REGION_ID))
return;
vsid = get_vsid(mm->context, pc);
vsid = get_vsid(mm->context.id, pc);
__slb_allocate(pc_esid, vsid, mm->context);
if (pc_esid == stack_esid)
......@@ -452,7 +452,7 @@ static void preload_slb(struct task_struct *tsk, struct mm_struct *mm)
if (!IS_VALID_EA(stack) || (REGION_ID(stack) >= KERNEL_REGION_ID))
return;
vsid = get_vsid(mm->context, stack);
vsid = get_vsid(mm->context.id, stack);
__slb_allocate(stack_esid, vsid, mm->context);
if (pc_esid == unmapped_base_esid || stack_esid == unmapped_base_esid)
......@@ -461,7 +461,7 @@ static void preload_slb(struct task_struct *tsk, struct mm_struct *mm)
if (!IS_VALID_EA(unmapped_base) ||
(REGION_ID(unmapped_base) >= KERNEL_REGION_ID))
return;
vsid = get_vsid(mm->context, unmapped_base);
vsid = get_vsid(mm->context.id, unmapped_base);
__slb_allocate(unmapped_base_esid, vsid, mm->context);
}
......
......@@ -35,7 +35,6 @@
#include <linux/vmalloc.h>
#include <linux/string.h>
#include <linux/proc_fs.h>
#include <linux/device.h>
#include <linux/dma-mapping.h>
#include <linux/wait.h>
......@@ -49,8 +48,6 @@
#include <asm/iSeries/iSeries_proc.h>
#include <asm/iSeries/vio.h>
extern struct device *iSeries_vio_dev;
/* Status of the path to each other partition in the system.
* This is overkill, since we will only ever establish connections
* to our hosting partition and the primary partition on the system.
......
......@@ -265,7 +265,7 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
if (mm == NULL)
return 1;
vsid = get_vsid(mm->context, ea);
vsid = get_vsid(mm->context.id, ea);
break;
case IO_REGION_ID:
mm = &ioremap_mm;
......
......@@ -244,7 +244,7 @@ static int open_32bit_htlbpage_range(struct mm_struct *mm)
struct vm_area_struct *vma;
unsigned long addr;
if (mm->context & CONTEXT_LOW_HPAGES)
if (mm->context.low_hpages)
return 0; /* The window is already open */
/* Check no VMAs are in the region */
......@@ -281,7 +281,7 @@ static int open_32bit_htlbpage_range(struct mm_struct *mm)
/* FIXME: do we need to scan for PTEs too? */
mm->context |= CONTEXT_LOW_HPAGES;
mm->context.low_hpages = 1;
/* the context change must make it to memory before the slbia,
* so that further SLB misses do the right thing. */
......@@ -589,7 +589,6 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
}
}
unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
unsigned long len, unsigned long pgoff,
unsigned long flags)
......@@ -778,7 +777,7 @@ static void flush_hash_hugepage(mm_context_t context, unsigned long ea,
BUG_ON(hugepte_bad(pte));
BUG_ON(!in_hugepage_area(context, ea));
vsid = get_vsid(context, ea);
vsid = get_vsid(context.id, ea);
va = (vsid << 28) | (ea & 0x0fffffff);
vpn = va >> LARGE_PAGE_SHIFT;
......
......@@ -794,7 +794,7 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long ea,
if (!ptep)
return;
vsid = get_vsid(vma->vm_mm->context, ea);
vsid = get_vsid(vma->vm_mm->context.id, ea);
tmp = cpumask_of_cpu(smp_processor_id());
if (cpus_equal(vma->vm_mm->cpu_vm_mask, tmp))
......
......@@ -62,7 +62,7 @@ void hpte_update(pte_t *ptep, unsigned long pte, int wrprot)
addr = ptep_to_address(ptep);
if (REGION_ID(addr) == USER_REGION_ID)
context = mm->context;
context = mm->context.id;
i = batch->index;
/*
......
......@@ -344,7 +344,7 @@ xmon(struct pt_regs *excp)
#endif /* CONFIG_SMP */
set_msrd(msr); /* restore interrupt enable */
return 0;
return 1;
}
int
......
......@@ -1117,34 +1117,6 @@ COMPATIBLE_IOCTL(BNEPCONNADD)
COMPATIBLE_IOCTL(BNEPCONNDEL)
COMPATIBLE_IOCTL(BNEPGETCONNLIST)
COMPATIBLE_IOCTL(BNEPGETCONNINFO)
/* device-mapper */
#if defined(CONFIG_DM_IOCTL_V4)
COMPATIBLE_IOCTL(DM_VERSION)
COMPATIBLE_IOCTL(DM_REMOVE_ALL)
COMPATIBLE_IOCTL(DM_LIST_DEVICES)
COMPATIBLE_IOCTL(DM_DEV_CREATE)
COMPATIBLE_IOCTL(DM_DEV_REMOVE)
COMPATIBLE_IOCTL(DM_DEV_RENAME)
COMPATIBLE_IOCTL(DM_DEV_SUSPEND)
COMPATIBLE_IOCTL(DM_DEV_STATUS)
COMPATIBLE_IOCTL(DM_DEV_WAIT)
COMPATIBLE_IOCTL(DM_TABLE_LOAD)
COMPATIBLE_IOCTL(DM_TABLE_CLEAR)
COMPATIBLE_IOCTL(DM_TABLE_DEPS)
COMPATIBLE_IOCTL(DM_TABLE_STATUS)
#else
COMPATIBLE_IOCTL(DM_VERSION)
COMPATIBLE_IOCTL(DM_REMOVE_ALL)
COMPATIBLE_IOCTL(DM_DEV_CREATE)
COMPATIBLE_IOCTL(DM_DEV_REMOVE)
COMPATIBLE_IOCTL(DM_DEV_RELOAD)
COMPATIBLE_IOCTL(DM_DEV_SUSPEND)
COMPATIBLE_IOCTL(DM_DEV_RENAME)
COMPATIBLE_IOCTL(DM_DEV_DEPS)
COMPATIBLE_IOCTL(DM_DEV_STATUS)
COMPATIBLE_IOCTL(DM_TARGET_STATUS)
COMPATIBLE_IOCTL(DM_TARGET_WAIT)
#endif
/* And these ioctls need translation */
/* NCPFS */
HANDLE_IOCTL(NCP_IOC_NCPREQUEST_32, do_ncp_ncprequest)
......
......@@ -59,10 +59,11 @@ static void arc4_crypt(void *ctx_arg, u8 *out, const u8 *in)
u8 *const S = ctx->S;
u8 x = ctx->x;
u8 y = ctx->y;
u8 a, b;
u8 a = S[x];
a = S[x];
y = (y + a) & 0xff;
u8 b = S[y];
b = S[y];
S[x] = b;
S[y] = a;
x = (x + 1) & 0xff;
......
......@@ -321,6 +321,7 @@ config BLK_DEV_RAM_SIZE
config BLK_DEV_INITRD
bool "Initial RAM disk (initrd) support"
depends on BLK_DEV_RAM && BLK_DEV_RAM!=m
help
The initial RAM disk is a RAM disk that is loaded by the boot loader
(loadlin or lilo) and that is mounted as root before the normal boot
......
......@@ -4242,6 +4242,15 @@ int __init floppy_init(void)
disks[i] = alloc_disk(1);
if (!disks[i])
goto Enomem;
disks[i]->major = FLOPPY_MAJOR;
disks[i]->first_minor = TOMINOR(i);
disks[i]->fops = &floppy_fops;
sprintf(disks[i]->disk_name, "fd%d", i);
init_timer(&motor_off_timer[i]);
motor_off_timer[i].data = i;
motor_off_timer[i].function = motor_off_callback;
}
devfs_mk_dir ("floppy");
......@@ -4255,13 +4264,6 @@ int __init floppy_init(void)
goto fail_queue;
}
for (i=0; i<N_DRIVE; i++) {
disks[i]->major = FLOPPY_MAJOR;
disks[i]->first_minor = TOMINOR(i);
disks[i]->fops = &floppy_fops;
sprintf(disks[i]->disk_name, "fd%d", i);
}
blk_register_region(MKDEV(FLOPPY_MAJOR, 0), 256, THIS_MODULE,
floppy_find, NULL, NULL);
......@@ -4366,9 +4368,6 @@ int __init floppy_init(void)
}
for (drive = 0; drive < N_DRIVE; drive++) {
init_timer(&motor_off_timer[drive]);
motor_off_timer[drive].data = drive;
motor_off_timer[drive].function = motor_off_callback;
if (!(allowed_drive_mask & (1 << drive)))
continue;
if (fdc_state[FDC(drive)].version == FDC_NONE)
......
......@@ -1188,13 +1188,23 @@ static void blk_unplug_timeout(unsigned long data)
* Description:
* blk_start_queue() will clear the stop flag on the queue, and call
* the request_fn for the queue if it was in a stopped state when
* entered. Also see blk_stop_queue(). Must not be called from driver
* request function due to recursion issues. Queue lock must be held.
* entered. Also see blk_stop_queue(). Queue lock must be held.
**/
void blk_start_queue(request_queue_t *q)
{
clear_bit(QUEUE_FLAG_STOPPED, &q->queue_flags);
/*
* one level of recursion is ok and is much faster than kicking
* the unplug handling
*/
if (!test_and_set_bit(QUEUE_FLAG_REENTER, &q->queue_flags)) {
q->request_fn(q);
clear_bit(QUEUE_FLAG_REENTER, &q->queue_flags);
} else {
blk_plug_device(q);
schedule_work(&q->unplug_work);
}
}
EXPORT_SYMBOL(blk_start_queue);
......@@ -1737,9 +1747,9 @@ void blk_insert_request(request_queue_t *q, struct request *rq,
/*
* If command is tagged, release the tag
*/
if(reinsert) {
if (reinsert)
blk_requeue_request(q, rq);
} else {
else {
int where = ELEVATOR_INSERT_BACK;
if (at_head)
......@@ -1751,6 +1761,9 @@ void blk_insert_request(request_queue_t *q, struct request *rq,
drive_stat_acct(rq, rq->nr_sectors, 1);
__elv_add_request(q, rq, where, 0);
}
if (blk_queue_plugged(q))
__generic_unplug_device(q);
else
q->request_fn(q);
spin_unlock_irqrestore(q->queue_lock, flags);
}
......
......@@ -38,7 +38,6 @@
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/string.h>
#include <linux/device.h>
#include <linux/dma-mapping.h>
#include <linux/completion.h>
......@@ -77,8 +76,6 @@ static spinlock_t viodasd_spinlock = SPIN_LOCK_UNLOCKED;
#define DEVICE_NO(cell) ((struct viodasd_device *)(cell) - &viodasd_devices[0])
extern struct device *iSeries_vio_dev;
struct open_data {
u64 disk_size;
u16 max_disk;
......
......@@ -20,3 +20,4 @@ obj-$(CONFIG_OPTCD) += optcd.o
obj-$(CONFIG_SBPCD) += sbpcd.o cdrom.o
obj-$(CONFIG_SJCD) += sjcd.o
obj-$(CONFIG_CDU535) += sonycd535.o
obj-$(CONFIG_VIOCD) += viocd.o cdrom.o
This diff is collapsed.
......@@ -522,7 +522,7 @@ RIO_DEBUG_CTRL, if (su)
else {
rio_dprintk (RIO_DEBUG_CTRL, "p->RIOBindTab full! - Rta %x not added\n",
(int) arg);
return 1;
return -ENOMEM;
}
return 0;
}
......@@ -1593,12 +1593,12 @@ RIO_DEBUG_CTRL, if (su)
case RIO_NO_MESG:
if ( su )
p->RIONoMessage = 1;
return su ? 0 : EPERM;
return su ? 0 : -EPERM;
case RIO_MESG:
if ( su )
p->RIONoMessage = 0;
return su ? 0 : EPERM;
return su ? 0 : -EPERM;
case RIO_WHAT_MESG:
if ( copyout( (caddr_t)&p->RIONoMessage, (int)arg,
......
......@@ -152,7 +152,7 @@ static int __init i2c_bitelv_init(void)
return -ENODEV;
}
}
pr_debug("i2c-elv: found device at %#x.\n",base);
pr_debug("i2c-elv: found device at %#lx.\n",base);
return 0;
}
......
......@@ -138,7 +138,7 @@ static int __init i2c_bitvelle_init(void)
return -ENODEV;
}
}
pr_debug("i2c-velleman: found device at %#x.\n",base);
pr_debug("i2c-velleman: found device at %#lx.\n",base);
return 0;
}
......
......@@ -699,8 +699,8 @@ config BLK_DEV_PDC202XX_OLD
config PDC202XX_BURST
bool "Special UDMA Feature"
depends on BLK_DEV_PDC202XX_OLD=y
---help---
depends on BLK_DEV_PDC202XX_OLD
help
This option causes the pdc202xx driver to enable UDMA modes on the
PDC202xx even when the PDC202xx BIOS has not done so.
......@@ -720,7 +720,7 @@ config BLK_DEV_PDC202XX_NEW
# FIXME - probably wants to be one for old and for new
config PDC202XX_FORCE
bool "Enable controller even if disabled by BIOS"
depends on BLK_DEV_PDC202XX_NEW=y
depends on BLK_DEV_PDC202XX_NEW
help
Enable the PDC202xx controller even if it has been disabled in the BIOS setup.
......
......@@ -162,14 +162,6 @@ config BLK_DEV_DM
If unsure, say N.
config DM_IOCTL_V4
bool "ioctl interface version 4"
depends on BLK_DEV_DM
default y
---help---
Recent tools use a new version of the ioctl interface, only
select this option if you intend using such tools.
config DM_CRYPT
tristate "Crypt target support"
depends on BLK_DEV_DM && EXPERIMENTAL
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
......@@ -503,10 +503,11 @@ v4l_compat_translate_ioctl(struct inode *inode,
int *on = arg;
if (0 == *on) {
enum v4l2_buf_type type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
/* dirty hack time. But v4l1 has no STREAMOFF
* equivalent in the API, and this one at
* least comes close ... */
drv(inode, file, VIDIOC_STREAMOFF, NULL);
drv(inode, file, VIDIOC_STREAMOFF, &type);
}
err = drv(inode, file, VIDIOC_OVERLAY, arg);
if (err < 0)
......@@ -857,6 +858,7 @@ v4l_compat_translate_ioctl(struct inode *inode,
case VIDIOCMCAPTURE: /* capture a frame */
{
struct video_mmap *mm = arg;
enum v4l2_buf_type type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
fmt2 = kmalloc(sizeof(*fmt2),GFP_KERNEL);
memset(&buf2,0,sizeof(buf2));
......@@ -897,7 +899,7 @@ v4l_compat_translate_ioctl(struct inode *inode,
dprintk("VIDIOCMCAPTURE / VIDIOC_QBUF: %d\n",err);
break;
}
err = drv(inode, file, VIDIOC_STREAMON, NULL);
err = drv(inode, file, VIDIOC_STREAMON, &type);
if (err < 0)
dprintk("VIDIOCMCAPTURE / VIDIOC_STREAMON: %d\n",err);
break;
......
......@@ -39,7 +39,10 @@ config I2O_BLOCK
depends on I2O
help
Include support for the I2O Block OSM. The Block OSM presents disk
and other structured block devices to the operating system.
and other structured block devices to the operating system. If you
are using an RAID controller, you could access the array only by
the Block OSM driver. But it is possible to access the single disks
by the SCSI OSM driver, for example to monitor the disks.
To compile this support as a module, choose M here: the
module will be called i2o_block.
......@@ -50,7 +53,8 @@ config I2O_SCSI
help
Allows direct SCSI access to SCSI devices on a SCSI or FibreChannel
I2O controller. You can use both the SCSI and Block OSM together if
you wish.
you wish. To access a RAID array, you must use the Block OSM driver.
But you could use the SCSI OSM driver to monitor the single disks.
To compile this support as a module, choose M here: the
module will be called i2o_scsi.
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
......@@ -1460,3 +1460,5 @@ static void __exit cfi_staa_exit(void)
module_init(cfi_staa_init);
module_exit(cfi_staa_exit);
MODULE_LICENSE("GPL");
......@@ -93,3 +93,4 @@ void simple_map_init(struct map_info *map)
}
EXPORT_SYMBOL(simple_map_init);
MODULE_LICENSE("GPL");
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
......@@ -211,7 +211,7 @@ int tulip_poll(struct net_device *dev, int *budget)
if (tp->rx_buffers[entry].mapping !=
le32_to_cpu(tp->rx_ring[entry].buffer1)) {
printk(KERN_ERR "%s: Internal fault: The skbuff addresses "
"do not match in tulip_rx: %08x vs. %llx %p / %p.\n",
"do not match in tulip_rx: %08x vs. %08llx %p / %p.\n",
dev->name,
le32_to_cpu(tp->rx_ring[entry].buffer1),
(unsigned long long)tp->rx_buffers[entry].mapping,
......
This diff is collapsed.
......@@ -24,6 +24,7 @@
#include <linux/smp_lock.h>
#include <linux/delay.h>
#include <linux/spinlock.h>
#include <linux/mm.h>
#include <asm/openprom.h>
#include <asm/oplib.h>
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment