Commit 209174ea authored by David S. Miller's avatar David S. Miller

Merge nuts.davemloft.net:/disk1/BK/sparcwork-2.6

into nuts.davemloft.net:/disk1/BK/sparc-2.6
parents 3c68d090 6aaac55f
......@@ -190,7 +190,7 @@ We'll say that the horizontal scanrate is about 31 kHz:
1/(32.141E-6 s) = 31.113E3 Hz
A full screen counts 480 (yres) lines, but we have to consider the vertical
retrace too (e.g. 49 `pixels'). So a full screen will take
retrace too (e.g. 49 `lines'). So a full screen will take
(480+49)*32.141E-6 s = 17.002E-3 s
......
......@@ -17,6 +17,7 @@
#include <asm/io.h>
#include <asm/pgtable.h>
#include <asm/fixmap.h>
#include "io_ports.h"
extern spinlock_t i8253_lock;
......@@ -62,6 +63,17 @@ static void mark_offset_cyclone(void)
count = inb_p(0x40); /* read the latched count */
count |= inb(0x40) << 8;
/*
* VIA686a test code... reset the latch if count > max + 1
* from timer_pit.c - cjb
*/
if (count > LATCH) {
outb_p(0x34, PIT_MODE);
outb_p(LATCH & 0xff, PIT_CH0);
outb(LATCH >> 8, PIT_CH0);
count = LATCH - 1;
}
spin_unlock(&i8253_lock);
/* lost tick compensation */
......
......@@ -134,7 +134,6 @@ parse_unisys_oem (char *oemptr, int oem_entries)
}
if (success < 2) {
printk("\nNo ES7000 found.\n");
es7000_plat = 0;
} else {
printk("\nEnabling ES7000 specific features...\n");
......
......@@ -86,7 +86,7 @@ include/asm-$(ARCH)/offsets.h: arch/$(ARCH)/kernel/asm-offsets.s
ifdef CONFIG_6xx
# Ensure this is binutils 2.12.1 (or 2.12.90.0.7) or later
NEW_AS := $(shell echo dssall | $(AS) -o /dev/null >/dev/null 2>&1 ; echo $$?)
NEW_AS := $(shell echo dssall | $(AS) -many -o /dev/null >/dev/null 2>&1 ; echo $$?)
GOODVER := 2.12.1
else
NEW_AS := 0
......@@ -94,7 +94,7 @@ endif
ifneq ($(NEW_AS),0)
checkbin:
@echo -n '*** ${VERSION}.${PATCHLEVEL} kernels no longer build'
@echo -n '*** ${VERSION}.${PATCHLEVEL} kernels no longer build '
@echo 'correctly with old versions of binutils.'
@echo '*** Please upgrade your binutils to ${GOODVER} or newer'
@false
......
......@@ -32,7 +32,9 @@
extern struct subsystem devices_subsys; /* needed for vio_find_name() */
struct iommu_table *vio_build_iommu_table(struct vio_dev *dev);
static struct iommu_table *vio_build_iommu_table(struct vio_dev *);
static const struct vio_device_id *vio_match_device(
const struct vio_device_id *, const struct vio_dev *);
#ifdef CONFIG_PPC_PSERIES
static int vio_num_address_cells;
......@@ -136,15 +138,15 @@ EXPORT_SYMBOL(vio_unregister_driver);
* system is in its list of supported devices. Returns the matching
* vio_device_id structure or NULL if there is no match.
*/
const struct vio_device_id * vio_match_device(const struct vio_device_id *ids,
static const struct vio_device_id * vio_match_device(const struct vio_device_id *ids,
const struct vio_dev *dev)
{
DBGENTER();
#ifdef CONFIG_PPC_PSERIES
while (ids->type) {
if ((strncmp(dev->archdata->type, ids->type, strlen(ids->type)) == 0) &&
device_is_compatible((struct device_node*)dev->archdata, ids->compat))
if ((strncmp(((struct device_node *)dev->dev.platform_data)->type, ids->type, strlen(ids->type)) == 0) &&
device_is_compatible(dev->dev.platform_data, ids->compat))
return ids;
ids++;
}
......@@ -263,14 +265,13 @@ static void __devinit vio_dev_release(struct device *dev)
DBGENTER();
/* XXX free TCE table */
of_node_put(viodev->archdata);
of_node_put(viodev->dev.platform_data);
kfree(viodev);
}
static ssize_t viodev_show_devspec(struct device *dev, char *buf)
{
struct vio_dev *viodev = to_vio_dev(dev);
struct device_node *of_node = viodev->archdata;
struct device_node *of_node = dev->platform_data;
return sprintf(buf, "%s\n", of_node->full_name);
}
......@@ -278,8 +279,7 @@ DEVICE_ATTR(devspec, S_IRUSR | S_IRGRP | S_IROTH, viodev_show_devspec, NULL);
static ssize_t viodev_show_name(struct device *dev, char *buf)
{
struct vio_dev *viodev = to_vio_dev(dev);
struct device_node *of_node = viodev->archdata;
struct device_node *of_node = dev->platform_data;
return sprintf(buf, "%s\n", of_node->name);
}
......@@ -290,7 +290,7 @@ DEVICE_ATTR(name, S_IRUSR | S_IRGRP | S_IROTH, viodev_show_name, NULL);
* @of_node: The OF node for this device.
*
* Creates and initializes a vio_dev structure from the data in
* of_node (archdata) and adds it to the list of virtual devices.
* of_node (dev.platform_data) and adds it to the list of virtual devices.
* Returns a pointer to the created vio_dev or NULL if node has
* NULL device_type or compatible fields.
*/
......@@ -324,7 +324,7 @@ struct vio_dev * __devinit vio_register_device(struct device_node *of_node)
}
memset(viodev, 0, sizeof(struct vio_dev));
viodev->archdata = (void *)of_node_get(of_node);
viodev->dev.platform_data = of_node_get(of_node);
viodev->unit_address = *unit_address;
viodev->iommu_table = vio_build_iommu_table(viodev);
......@@ -380,7 +380,7 @@ EXPORT_SYMBOL(vio_unregister_device);
*/
const void * vio_get_attribute(struct vio_dev *vdev, void* which, int* length)
{
return get_property((struct device_node *)vdev->archdata, (char*)which, length);
return get_property(vdev->dev.platform_data, (char*)which, length);
}
EXPORT_SYMBOL(vio_get_attribute);
......@@ -427,7 +427,7 @@ EXPORT_SYMBOL(vio_find_node);
* Returns a pointer to the built tce tree, or NULL if it can't
* find property.
*/
struct iommu_table * vio_build_iommu_table(struct vio_dev *dev)
static struct iommu_table * vio_build_iommu_table(struct vio_dev *dev)
{
unsigned int *dma_window;
struct iommu_table *newTceTable;
......@@ -435,7 +435,7 @@ struct iommu_table * vio_build_iommu_table(struct vio_dev *dev)
unsigned long size;
int dma_window_property_size;
dma_window = (unsigned int *) get_property((struct device_node *)dev->archdata, "ibm,my-dma-window", &dma_window_property_size);
dma_window = (unsigned int *) get_property(dev->dev.platform_data, "ibm,my-dma-window", &dma_window_property_size);
if(!dma_window) {
return NULL;
}
......
......@@ -32,6 +32,7 @@ int main(void)
DEFINE(__TI_cpu, offsetof(struct thread_info, cpu),);
DEFINE(__TI_precount, offsetof(struct thread_info, preempt_count),);
BLANK();
DEFINE(__PT_ARGS, offsetof(struct pt_regs, args),);
DEFINE(__PT_PSW, offsetof(struct pt_regs, psw),);
DEFINE(__PT_GPRS, offsetof(struct pt_regs, gprs),);
DEFINE(__PT_ORIG_GPR2, offsetof(struct pt_regs, orig_gpr2),);
......
......@@ -1097,6 +1097,8 @@ compat_sys_futex_wrapper:
lgfr %r4,%r4 # int
llgtr %r5,%r5 # struct compat_timespec *
llgtr %r6,%r6 # u32 *
lgf %r0,164(%r15) # int
stg %r0,160(%r15)
jg compat_sys_futex # branch to system call
.globl sys32_setxattr_wrapper
......
......@@ -24,7 +24,8 @@
* Stack layout for the system_call stack entry.
* The first few entries are identical to the user_regs_struct.
*/
SP_PTREGS = STACK_FRAME_OVERHEAD
SP_PTREGS = STACK_FRAME_OVERHEAD
SP_ARGS = STACK_FRAME_OVERHEAD + __PT_ARGS
SP_PSW = STACK_FRAME_OVERHEAD + __PT_PSW
SP_R0 = STACK_FRAME_OVERHEAD + __PT_GPRS
SP_R1 = STACK_FRAME_OVERHEAD + __PT_GPRS + 4
......@@ -230,12 +231,14 @@ system_call:
sysc_enter:
GET_THREAD_INFO # load pointer to task_struct to R9
sla %r7,2 # *4 and test for svc 0
bnz BASED(sysc_do_restart) # svc number > 0
bnz BASED(sysc_nr_ok) # svc number > 0
# svc 0: system call number in %r1
cl %r1,BASED(.Lnr_syscalls)
bnl BASED(sysc_do_restart)
bnl BASED(sysc_nr_ok)
lr %r7,%r1 # copy svc number to %r7
sla %r7,2 # *4
sysc_nr_ok:
mvc SP_ARGS(4,%r15),SP_R7(%r15)
sysc_do_restart:
tm __TI_flags+3(%r9),(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT)
l %r8,sys_call_table-system_call(%r7,%r13) # get system call addr.
......@@ -510,6 +513,7 @@ pgm_svcper:
lr %r7,%r1 # copy svc number to %r7
sla %r7,2 # *4
pgm_svcstd:
mvc SP_ARGS(4,%r15),SP_R7(%r15)
tm __TI_flags+3(%r9),(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT)
l %r8,sys_call_table-system_call(%r7,%r13) # get system call addr.
bnz BASED(pgm_tracesys)
......
......@@ -24,7 +24,8 @@
* Stack layout for the system_call stack entry.
* The first few entries are identical to the user_regs_struct.
*/
SP_PTREGS = STACK_FRAME_OVERHEAD
SP_PTREGS = STACK_FRAME_OVERHEAD
SP_ARGS = STACK_FRAME_OVERHEAD + __PT_ARGS
SP_PSW = STACK_FRAME_OVERHEAD + __PT_PSW
SP_R0 = STACK_FRAME_OVERHEAD + __PT_GPRS
SP_R1 = STACK_FRAME_OVERHEAD + __PT_GPRS + 8
......@@ -214,13 +215,15 @@ system_call:
sysc_enter:
GET_THREAD_INFO # load pointer to task_struct to R9
slag %r7,%r7,2 # *4 and test for svc 0
jnz sysc_do_restart
jnz sysc_nr_ok
# svc 0: system call number in %r1
lghi %r0,NR_syscalls
clr %r1,%r0
jnl sysc_do_restart
jnl sysc_nr_ok
lgfr %r7,%r1 # clear high word in r1
slag %r7,%r7,2 # svc 0: system call number in %r1
sysc_nr_ok:
mvc SP_ARGS(8,%r15),SP_R7(%r15)
sysc_do_restart:
larl %r10,sys_call_table
#ifdef CONFIG_S390_SUPPORT
......@@ -542,6 +545,7 @@ pgm_svcper:
clr %r1,%r0
slag %r7,%r1,2
pgm_svcstd:
mvc SP_ARGS(8,%r15),SP_R7(%r15)
larl %r10,sys_call_table
#ifdef CONFIG_S390_SUPPORT
tm SP_PSW+3(%r15),0x01 # are we running in 31 bit mode ?
......
......@@ -141,7 +141,7 @@ peek_user(struct task_struct *child, addr_t addr, addr_t data)
/*
* psw and gprs are stored on the stack
*/
tmp = *(addr_t *)((addr_t) __KSTK_PTREGS(child) + addr);
tmp = *(addr_t *)((addr_t) &__KSTK_PTREGS(child)->psw + addr);
if (addr == (addr_t) &dummy->regs.psw.mask)
/* Remove per bit from user psw. */
tmp &= ~PSW_MASK_PER;
......@@ -215,7 +215,7 @@ poke_user(struct task_struct *child, addr_t addr, addr_t data)
high order bit but older gdb's rely on it */
data |= PSW_ADDR_AMODE;
#endif
*(addr_t *)((addr_t) __KSTK_PTREGS(child) + addr) = data;
*(addr_t *)((addr_t) &__KSTK_PTREGS(child)->psw + addr) = data;
} else if (addr < (addr_t) (&dummy->regs.orig_gpr2)) {
/*
......@@ -360,7 +360,7 @@ peek_user_emu31(struct task_struct *child, addr_t addr, addr_t data)
PSW32_ADDR_AMODE31;
} else {
/* gpr 0-15 */
tmp = *(__u32 *)((addr_t) __KSTK_PTREGS(child) +
tmp = *(__u32 *)((addr_t) &__KSTK_PTREGS(child)->psw +
addr*2 + 4);
}
} else if (addr < (addr_t) (&dummy32->regs.orig_gpr2)) {
......@@ -439,8 +439,8 @@ poke_user_emu31(struct task_struct *child, addr_t addr, addr_t data)
(__u64) tmp & PSW32_ADDR_INSN;
} else {
/* gpr 0-15 */
*(__u32*)((addr_t) __KSTK_PTREGS(child) + addr*2 + 4) =
tmp;
*(__u32*)((addr_t) &__KSTK_PTREGS(child)->psw
+ addr*2 + 4) = tmp;
}
} else if (addr < (addr_t) (&dummy32->regs.orig_gpr2)) {
/*
......
......@@ -2215,7 +2215,7 @@ static int __init amb_init (amb_dev * dev)
} /* amb_reset */
return -1;
return -EINVAL;
}
static void setup_dev(amb_dev *dev, struct pci_dev *pci_dev)
......@@ -2257,27 +2257,31 @@ static void setup_dev(amb_dev *dev, struct pci_dev *pci_dev)
spin_lock_init (&dev->rxq[pool].lock);
}
static void setup_pci_dev(struct pci_dev *pci_dev)
static int setup_pci_dev(struct pci_dev *pci_dev)
{
unsigned char lat;
unsigned char lat;
int ret;
/* XXX check return value */
pci_enable_device(pci_dev);
// enable bus master accesses
pci_set_master(pci_dev);
// enable bus master accesses
pci_set_master(pci_dev);
// frobnicate latency (upwards, usually)
pci_read_config_byte (pci_dev, PCI_LATENCY_TIMER, &lat);
if (pci_lat) {
PRINTD (DBG_INIT, "%s PCI latency timer from %hu to %hu",
"changing", lat, pci_lat);
pci_write_config_byte (pci_dev, PCI_LATENCY_TIMER, pci_lat);
} else if (lat < MIN_PCI_LATENCY) {
PRINTK (KERN_INFO, "%s PCI latency timer from %hu to %hu",
"increasing", lat, MIN_PCI_LATENCY);
pci_write_config_byte (pci_dev, PCI_LATENCY_TIMER, MIN_PCI_LATENCY);
}
ret = pci_enable_device(pci_dev);
if (ret < 0)
goto out;
// frobnicate latency (upwards, usually)
pci_read_config_byte (pci_dev, PCI_LATENCY_TIMER, &lat);
if (!pci_lat)
pci_lat = (lat < MIN_PCI_LATENCY) ? MIN_PCI_LATENCY : lat;
if (lat != pci_lat) {
PRINTK (KERN_INFO, "Changing PCI latency timer from %hu to %hu",
lat, pci_lat);
pci_write_config_byte(pci_dev, PCI_LATENCY_TIMER, pci_lat);
}
out:
return ret;
}
static int __init do_pci_device(struct pci_dev *pci_dev)
......@@ -2294,40 +2298,43 @@ static int __init do_pci_device(struct pci_dev *pci_dev)
" IO %x, IRQ %u, MEM %p", iobase, irq, membase);
// check IO region
if (!request_region (iobase, AMB_EXTENT, DEV_LABEL)) {
err = pci_request_region(pci_dev, 1, DEV_LABEL);
if (err < 0) {
PRINTK (KERN_ERR, "IO range already in use!");
return -EBUSY;
goto out;
}
dev = kmalloc (sizeof(amb_dev), GFP_KERNEL);
if (!dev) {
PRINTK (KERN_ERR, "out of memory!");
err = -ENOMEM;
goto out;
goto out_release;
}
setup_dev(dev, pci_dev);
if (amb_init (dev)) {
err = amb_init(dev);
if (err < 0) {
PRINTK (KERN_ERR, "adapter initialisation failure");
err = -EINVAL;
goto out1;
goto out_free;
}
setup_pci_dev(pci_dev);
err = setup_pci_dev(pci_dev);
if (err < 0)
goto out_reset;
// grab (but share) IRQ and install handler
if (request_irq (irq, interrupt_handler, SA_SHIRQ, DEV_LABEL, dev)) {
err = request_irq(irq, interrupt_handler, SA_SHIRQ, DEV_LABEL, dev);
if (err < 0) {
PRINTK (KERN_ERR, "request IRQ failed!");
err = -EBUSY;
goto out2;
goto out_disable;
}
dev->atm_dev = atm_dev_register (DEV_LABEL, &amb_ops, -1, NULL);
if (!dev->atm_dev) {
PRINTD (DBG_ERR, "failed to register Madge ATM adapter");
err = -EINVAL;
goto out3;
goto out_free_irq;
}
PRINTD (DBG_INFO, "registered Madge ATM adapter (no. %d) (%p) at %p",
......@@ -2348,17 +2355,20 @@ static int __init do_pci_device(struct pci_dev *pci_dev)
// enable host interrupts
interrupts_on (dev);
return 0;
out3:
free_irq (irq, dev);
out2:
amb_reset (dev, 0);
out1:
kfree (dev);
out:
release_region (iobase, AMB_EXTENT);
return err;
out_free_irq:
free_irq(irq, dev);
out_disable:
pci_disable_device(pci_dev);
out_reset:
amb_reset(dev, 0);
out_free:
kfree(dev);
out_release:
pci_release_region(pci_dev, 1);
goto out;
}
static int __init amb_probe (void) {
......@@ -2488,7 +2498,10 @@ static void __exit amb_module_exit (void) {
del_timer_sync(&housekeeping);
while (amb_devs) {
struct pci_dev *pdev;
dev = amb_devs;
pdev = dev->pci_dev;
amb_devs = dev->prev;
PRINTD (DBG_INFO|DBG_INIT, "closing %p (atm_dev = %p)", dev, dev->atm_dev);
......@@ -2496,11 +2509,12 @@ static void __exit amb_module_exit (void) {
drain_rx_pools (dev);
interrupts_off (dev);
amb_reset (dev, 0);
free_irq (dev->irq, dev);
pci_disable_device (pdev);
destroy_queues (dev);
atm_dev_deregister (dev->atm_dev);
free_irq (dev->irq, dev);
release_region (dev->iobase, AMB_EXTENT);
kfree (dev);
pci_release_region (pdev, 1);
}
return;
......
......@@ -31,6 +31,12 @@
#define j44(a,b) (((a>>4)&0x0f)+(b&0xf0))
#define j53(a,b) (((a>>3)&0x1f)+((b<<4)&0xe0))
static int epatc8;
module_param(epatc8, int, 0);
MODULE_PARM_DESC(epatc8, "support for the Shuttle EP1284 chip, "
"used in any recent Imation SuperDisk (LS-120) drive.");
/* cont = 0 IDE register file
cont = 1 IDE control registers
cont = 2 internal EPAT registers
......@@ -209,15 +215,18 @@ static void epat_connect ( PIA *pi )
{ pi->saved_r0 = r0();
pi->saved_r2 = r2();
#ifdef CONFIG_PARIDE_EPATC8
/* Initialize the chip */
CPP(0);CPP(0x40);CPP(0xe0);
w0(0);w2(1);w2(4);
WR(0x8,0x12);WR(0xc,0x14);WR(0x12,0x10);
WR(0xe,0xf);WR(0xf,4);
/* WR(0xe,0xa);WR(0xf,4); */
WR(0xe,0xd);WR(0xf,0);
/* CPP(0x30); */
CPP(0);
if (epatc8) {
CPP(0x40);CPP(0xe0);
w0(0);w2(1);w2(4);
WR(0x8,0x12);WR(0xc,0x14);WR(0x12,0x10);
WR(0xe,0xf);WR(0xf,4);
/* WR(0xe,0xa);WR(0xf,4); */
WR(0xe,0xd);WR(0xf,0);
/* CPP(0x30); */
}
/* Connect to the chip */
CPP(0xe0);
......@@ -227,15 +236,10 @@ static void epat_connect ( PIA *pi )
/* Request EPP */
w0(0x40);w2(6);w2(7);w2(4);w2(0xc);w2(4);
}
#else
CPP(0); CPP(0xe0);
w0(0); w2(1); w2(4);
if (pi->mode >= 3) {
w0(0); w2(1); w2(4); w2(0xc);
w0(0x40); w2(6); w2(7); w2(4); w2(0xc); w2(4);
if (!epatc8) {
WR(8,0x10); WR(0xc,0x14); WR(0xa,0x38); WR(0x12,0x10);
}
WR(8,0x10); WR(0xc,0x14); WR(0xa,0x38); WR(0x12,0x10);
#endif
}
static void epat_disconnect (PIA *pi)
......@@ -320,6 +324,9 @@ static struct pi_protocol epat = {
static int __init epat_init(void)
{
#ifdef CONFIG_PARIDE_EPATC8
epatc8 = 1;
#endif
return pi_register(&epat)-1;
}
......
......@@ -671,28 +671,24 @@ int cdrom_has_defect_mgt(struct cdrom_device_info *cdi)
{
struct packet_command cgc;
char buffer[16];
struct feature_header *fh;
__u16 *feature_code;
int ret;
init_cdrom_command(&cgc, buffer, sizeof(buffer), CGC_DATA_READ);
cgc.cmd[0] = GPCMD_GET_CONFIGURATION; /* often 0x46 */
cgc.cmd[3] = CDF_HWDM; /* often 0x0024 */
cgc.cmd[8] = sizeof(buffer); /* often 0x10 */
cgc.cmd[0] = GPCMD_GET_CONFIGURATION;
cgc.cmd[3] = CDF_HWDM;
cgc.cmd[8] = sizeof(buffer);
cgc.quiet = 1;
if ((ret = cdi->ops->generic_packet(cdi, &cgc)))
return ret;
fh = (struct feature_header *)&buffer[0];
ret = 1;
if (be32_to_cpu(fh->data_len) >= (sizeof(struct feature_header)+8)) {
feature_code = (__u16 *)&buffer[sizeof(struct feature_header)];
if (CDF_HWDM == be16_to_cpu(*feature_code))
ret = 0;
}
return ret;
feature_code = (__u16 *) &buffer[sizeof(struct feature_header)];
if (be16_to_cpu(*feature_code) == CDF_HWDM)
return 0;
return 1;
}
......
......@@ -1157,7 +1157,7 @@ static void handle_stripe(struct stripe_head *sh)
* parity, or to satisfy requests
* or to load a block that is being partially written.
*/
if (to_read || non_overwrite || (syncing && (uptodate+failed < disks))) {
if (to_read || non_overwrite || (syncing && (uptodate < disks))) {
for (i=disks; i--;) {
dev = &sh->dev[i];
if (!test_bit(R5_LOCKED, &dev->flags) && !test_bit(R5_UPTODATE, &dev->flags) &&
......
......@@ -901,7 +901,7 @@ static int __devinit ibmveth_probe(struct vio_dev *dev, const struct vio_device_
adapter = netdev->priv;
memset(adapter, 0, sizeof(adapter));
dev->driver_data = netdev;
dev->dev.driver_data = netdev;
adapter->vdev = dev;
adapter->netdev = netdev;
......@@ -971,7 +971,7 @@ static int __devinit ibmveth_probe(struct vio_dev *dev, const struct vio_device_
static int __devexit ibmveth_remove(struct vio_dev *dev)
{
struct net_device *netdev = dev->driver_data;
struct net_device *netdev = dev->dev.driver_data;
struct ibmveth_adapter *adapter = netdev->priv;
unregister_netdev(netdev);
......
......@@ -494,9 +494,15 @@ config SCSI_OMIT_FLASHPOINT
substantial, so users of MultiMaster Host Adapters may wish to omit
it.
#
# This is marked broken because it uses over 4kB of stack in
# just two routines:
# 2076 CpqTsProcessIMQEntry
# 2052 PeekIMQEntry
#
config SCSI_CPQFCTS
tristate "Compaq Fibre Channel 64-bit/66Mhz HBA support"
depends on PCI && SCSI
depends on PCI && SCSI && BROKEN
help
Say Y here to compile in support for the Compaq StorageWorks Fibre
Channel 64-bit/66Mhz Host Bus Adapter.
......
......@@ -607,6 +607,7 @@ static int PeekIMQEntry( PTACHYON fcChip, ULONG type)
if( (fcChip->IMQ->QEntry[CI].type & 0x1FF) == 0x104 )
{
TachFCHDR_GCMND* fchs;
#error This is too much stack
ULONG ulFibreFrame[2048/4]; // max DWORDS in incoming FC Frame
USHORT SFQpi = (USHORT)(fcChip->IMQ->QEntry[CI].word[0] & 0x0fffL);
......@@ -718,6 +719,7 @@ int CpqTsProcessIMQEntry(void *host)
ULONG x_ID;
ULONG ulBuff, dwStatus;
TachFCHDR_GCMND* fchs;
#error This is too much stack
ULONG ulFibreFrame[2048/4]; // max number of DWORDS in incoming Fibre Frame
UCHAR ucInboundMessageType; // Inbound CM, dword 3 "type" field
......
......@@ -1372,8 +1372,6 @@ int __init vga16fb_init(void)
vga16fb.par = &vga16_par;
vga16fb.flags = FBINFO_FLAG_DEFAULT;
vga16fb.fix.smem_start = VGA_MAP_MEM(vga16fb.fix.smem_start);
i = (vga16fb_defined.bits_per_pixel == 8) ? 256 : 16;
ret = fb_alloc_cmap(&vga16fb.cmap, i, 0);
if (ret) {
......
......@@ -15,7 +15,7 @@
#include <linux/aio_abi.h>
#include <linux/module.h>
//#define DEBUG 1
#define DEBUG 0
#include <linux/sched.h>
#include <linux/fs.h>
......
......@@ -23,6 +23,7 @@ Amrut Joshi
Shobhit Dayal
Sergey Vlasov
Richard Hughes
Yury Umanets
Test case and Bug Report contributors
-------------------------------------
......@@ -30,5 +31,7 @@ Thanks to those in the community who have submitted detailed bug reports
and debug of problems they have found: Jochen Dolze, David Blaine,
Rene Scharfe, Martin Josefsson, Alexander Wild, Anthony Liguori,
Lars Muller, Urban Widmark, Massimiliano Ferrero, Howard Owen,
Kieron Briggs and others.
Olaf Kirch, Kieron Briggs and others.
And thanks to the IBM LTC and Power test teams and SuSE testers for
finding multiple bugs during excellent stress test runs.
Version 1.17
------------
Update number of blocks in file so du command is happier (in Linux a fake
blocksize of 512 is required for calculating number of blocks in inode).
Fix prepare write of partial pages to read in data from server if possible.
Fix race on tcpStatus field between unmount and reconnection code, causing
cifsd process sometimes to hang around forever. Improve out of memory
checks in cifs_filldir
Version 1.16
------------
Fix incorrect file size in file handle based setattr on big endian hardware.
......
......@@ -142,30 +142,10 @@ cifs_debug_data_read(char *buf, char **beginBuffer, off_t offset,
sprintf(buf, " type: %d ",
tcon->fsDevInfo.DeviceType);
buf += length;
if(tcon->tidStatus == CifsNeedReconnect)
if(tcon->tidStatus == CifsNeedReconnect) {
buf += sprintf(buf, "\tDISCONNECTED ");
#ifdef CONFIG_CIFS_STATS
length = sprintf(buf,"\nSMBs: %d Oplock Breaks: %d",
atomic_read(&tcon->num_smbs_sent),
atomic_read(&tcon->num_oplock_brks));
buf += length;
length = sprintf(buf,"\nReads: %d Bytes %lld",
atomic_read(&tcon->num_reads),
(long long)(tcon->bytes_read));
buf += length;
length = sprintf(buf,"\nWrites: %d Bytes: %lld",
atomic_read(&tcon->num_writes),
(long long)(tcon->bytes_written));
buf += length;
length = sprintf(buf,
"\nOpens: %d Deletes: %d\nMkdirs: %d Rmdirs: %d",
atomic_read(&tcon->num_opens),
atomic_read(&tcon->num_deletes),
atomic_read(&tcon->num_mkdirs),
atomic_read(&tcon->num_rmdirs));
buf += length;
#endif
length += 14;
}
}
read_unlock(&GlobalSMBSeslock);
......@@ -200,32 +180,80 @@ cifs_total_xid_read(char *buf, char **beginBuffer, off_t offset,
return length;
}
#ifdef CONFIG_CIFS_STATS
int
cifs_stats_read(char *buf, char **beginBuffer, off_t offset,
int length, int *eof, void *data)
{
int item_length;
length =
sprintf(buf,
"Currently Allocated structures\nCIFS Sessions: %d\n",sesInfoAllocCount.counter);
int item_length,i;
struct list_head *tmp;
struct cifsTconInfo *tcon;
length = sprintf(buf,
"Currently Allocated structures\nCIFS Sessions: %d\n",
sesInfoAllocCount.counter);
buf += length;
item_length =
sprintf(buf,"Shares (unique mount targets): %d\n",tconInfoAllocCount.counter);
sprintf(buf,"Shares (unique mount targets): %d\n",
tconInfoAllocCount.counter);
length += item_length;
buf += item_length;
item_length =
sprintf(buf,"Allocated SMB Request and Response Buffers: %d\n",bufAllocCount.counter);
sprintf(buf,"Allocated SMB Request/Response Buffers: %d\n",
bufAllocCount.counter);
length += item_length;
buf += item_length;
item_length =
sprintf(buf,"Active Operations (MIDs in use): %d\n",midCount.counter);
sprintf(buf,"Active Operations (MIDs in use): %d\n",
midCount.counter);
length += item_length;
buf += item_length;
item_length = sprintf(buf,"%d sessions and %d shares reconnected after failure\n",tcpSesReconnectCount.counter,tconInfoReconnectCount.counter);
item_length = sprintf(buf,
"%d sessions and %d shares reconnected after failure\n",
tcpSesReconnectCount.counter,tconInfoReconnectCount.counter);
length += item_length;
buf += item_length;
i = 0;
read_lock(&GlobalSMBSeslock);
list_for_each(tmp, &GlobalTreeConnectionList) {
i++;
tcon = list_entry(tmp, struct cifsTconInfo, cifsConnectionList);
item_length = sprintf(buf,"\n%d) %s",i, tcon->treeName);
buf += item_length;
length += item_length;
if(tcon->tidStatus == CifsNeedReconnect) {
buf += sprintf(buf, "\tDISCONNECTED ");
length += 14;
}
item_length = sprintf(buf,"\nSMBs: %d Oplock Breaks: %d",
atomic_read(&tcon->num_smbs_sent),
atomic_read(&tcon->num_oplock_brks));
buf += item_length;
length += item_length;
item_length = sprintf(buf,"\nReads: %d Bytes %lld",
atomic_read(&tcon->num_reads),
(long long)(tcon->bytes_read));
buf += item_length;
item_length = sprintf(buf,"\nWrites: %d Bytes: %lld",
atomic_read(&tcon->num_writes),
(long long)(tcon->bytes_written));
buf += item_length;
item_length = sprintf(buf,
"\nOpens: %d Deletes: %d\nMkdirs: %d Rmdirs: %d",
atomic_read(&tcon->num_opens),
atomic_read(&tcon->num_deletes),
atomic_read(&tcon->num_mkdirs),
atomic_read(&tcon->num_rmdirs));
buf += item_length;
length += item_length;
}
read_unlock(&GlobalSMBSeslock);
return length;
}
#endif
struct proc_dir_entry *proc_fs_cifs;
read_proc_t cifs_txanchor_read;
......@@ -265,10 +293,10 @@ cifs_proc_init(void)
create_proc_read_entry("SimultaneousOps", 0, proc_fs_cifs,
cifs_total_xid_read, 0);
#ifdef CONFIG_CIFS_STATS
create_proc_read_entry("Stats", 0, proc_fs_cifs,
cifs_stats_read, 0);
#endif
pde = create_proc_read_entry("cifsFYI", 0, proc_fs_cifs,
cifsFYI_read, 0);
if (pde)
......@@ -336,7 +364,9 @@ cifs_proc_clean(void)
remove_proc_entry("cifsFYI", proc_fs_cifs);
remove_proc_entry("traceSMB", proc_fs_cifs);
remove_proc_entry("SimultaneousOps", proc_fs_cifs);
#ifdef CONFIG_CIFS_STATS
remove_proc_entry("Stats", proc_fs_cifs);
#endif
remove_proc_entry("MultiuserMount", proc_fs_cifs);
remove_proc_entry("OplockEnabled", proc_fs_cifs);
remove_proc_entry("NTLMV2Enabled",proc_fs_cifs);
......
......@@ -93,5 +93,5 @@ extern int cifs_setxattr(struct dentry *, const char *, const void *,
size_t, int);
extern ssize_t cifs_getxattr(struct dentry *, const char *, void *, size_t);
extern ssize_t cifs_listxattr(struct dentry *, char *, size_t);
#define CIFS_VERSION "1.16"
#define CIFS_VERSION "1.17"
#endif /* _CIFSFS_H */
......@@ -862,6 +862,10 @@ typedef struct smb_com_create_directory_rsp {
__u16 ByteCount; /* bct = 0 */
} CREATE_DIRECTORY_RSP;
/***************************************************/
/* NT Transact structure defintions follow */
/* Currently only ioctl and notify are implemented */
/***************************************************/
typedef struct smb_com_transaction_ioctl_req {
struct smb_hdr hdr; /* wct = 23 */
__u8 MaxSetupCount;
......@@ -904,29 +908,45 @@ typedef struct smb_com_transaction_ioctl_rsp {
} TRANSACT_IOCTL_RSP;
typedef struct smb_com_transaction_change_notify_req {
struct smb_hdr hdr; /* wct = 23 */
__u8 MaxSetupCount;
__u16 Reserved;
__u32 TotalParameterCount;
__u32 TotalDataCount;
__u32 MaxParameterCount;
__u32 MaxDataCount;
__u32 ParameterCount;
__u32 ParameterOffset;
__u32 DataCount;
__u32 DataOffset;
__u8 SetupCount; /* four setup words follow subcommand */
/* SNIA spec incorrectly included spurious pad here */
__u16 SubCommand;/* 4 = Change Notify */
struct smb_hdr hdr; /* wct = 23 */
__u8 MaxSetupCount;
__u16 Reserved;
__u32 TotalParameterCount;
__u32 TotalDataCount;
__u32 MaxParameterCount;
__u32 MaxDataCount;
__u32 ParameterCount;
__u32 ParameterOffset;
__u32 DataCount;
__u32 DataOffset;
__u8 SetupCount; /* four setup words follow subcommand */
/* SNIA spec incorrectly included spurious pad here */
__u16 SubCommand;/* 4 = Change Notify */
__u32 CompletionFilter; /* operation to monitor */
__u16 Fid;
__u8 WatchTree; /* 1 = Monitor subdirectories */
__u8 Reserved2;
__u16 ByteCount;
__u8 Pad[3];
__u8 Data[1];
/* __u8 Pad[3];*/
/* __u8 Data[1];*/
} TRANSACT_CHANGE_NOTIFY_REQ;
/* Completion Filter flags */
typedef struct smb_com_transaction_change_notify_rsp {
struct smb_hdr hdr; /* wct = 18 */
__u8 Reserved[3];
__u32 TotalParameterCount;
__u32 TotalDataCount;
__u32 ParameterCount;
__u32 ParameterOffset;
__u32 ParameterDisplacement;
__u32 DataCount;
__u32 DataOffset;
__u32 DataDisplacement;
__u8 SetupCount; /* 0 */
__u16 ByteCount;
/* __u8 Pad[3]; */
} TRANSACT_CHANGE_NOTIFY_RSP;
/* Completion Filter flags for Notify */
#define FILE_NOTIFY_CHANGE_FILE_NAME 0x00000001
#define FILE_NOTIFY_CHANGE_DIR_NAME 0x00000002
#define FILE_NOTIFY_CHANGE_NAME 0x00000003
......
......@@ -244,4 +244,7 @@ extern int CIFSSMBCopy(int xid,
const __u16 target_tid,
const char *toName, const int flags,
const struct nls_table *nls_codepage);
extern int CIFSSMBNotify(const int xid, struct cifsTconInfo *tcon,
const int notify_subdirs,const __u16 netfid,__u32 filter,
const struct nls_table *nls_codepage);
#endif /* _CIFSPROTO_H */
......@@ -383,8 +383,11 @@ CIFSSMBLogoff(const int xid, struct cifsSesInfo *ses)
smb_buffer_response, &length, 0);
if (ses->server) {
atomic_dec(&ses->server->socketUseCount);
if (atomic_read(&ses->server->socketUseCount) == 0)
if (atomic_read(&ses->server->socketUseCount) == 0) {
spin_lock(&GlobalMid_Lock);
ses->server->tcpStatus = CifsExiting;
spin_unlock(&GlobalMid_Lock);
}
}
if (pSMB)
cifs_buf_release(pSMB);
......@@ -1464,9 +1467,9 @@ CIFSSMBQueryReparseLinkInfo(const int xid, struct cifsTconInfo *tcon,
pSMB->TotalParameterCount = 0 ;
pSMB->TotalDataCount = 0;
pSMB->MaxParameterCount = cpu_to_le16(2);
pSMB->MaxParameterCount = cpu_to_le32(2);
/* BB find exact data count max from sess structure BB */
pSMB->MaxDataCount = cpu_to_le16(4000);
pSMB->MaxDataCount = cpu_to_le32(4000);
pSMB->MaxSetupCount = 4;
pSMB->Reserved = 0;
pSMB->ParameterOffset = 0;
......@@ -2828,3 +2831,51 @@ CIFSSMBUnixSetPerms(const int xid, struct cifsTconInfo *tcon,
goto setPermsRetry;
return rc;
}
int CIFSSMBNotify(const int xid, struct cifsTconInfo *tcon,
const int notify_subdirs, const __u16 netfid,
__u32 filter, const struct nls_table *nls_codepage)
{
int rc = 0;
struct smb_com_transaction_change_notify_req * pSMB = NULL;
struct smb_com_transaction_change_notify_rsp * pSMBr = NULL;
int bytes_returned;
cFYI(1, ("In CIFSSMBNotify for file handle %d",(int)netfid));
rc = smb_init(SMB_COM_NT_TRANSACT, 23, tcon, (void **) &pSMB,
(void **) &pSMBr);
if (rc)
return rc;
pSMB->TotalParameterCount = 0 ;
pSMB->TotalDataCount = 0;
pSMB->MaxParameterCount = cpu_to_le32(2);
/* BB find exact data count max from sess structure BB */
pSMB->MaxDataCount = 0; /* same in little endian or be */
pSMB->MaxSetupCount = 4;
pSMB->Reserved = 0;
pSMB->ParameterOffset = 0;
pSMB->DataCount = 0;
pSMB->DataOffset = 0;
pSMB->SetupCount = 4; /* single byte does not need le conversion */
pSMB->SubCommand = cpu_to_le16(NT_TRANSACT_NOTIFY_CHANGE);
pSMB->ParameterCount = pSMB->TotalParameterCount;
if(notify_subdirs)
pSMB->WatchTree = 1; /* one byte - no le conversion needed */
pSMB->Reserved2 = 0;
pSMB->CompletionFilter = cpu_to_le32(filter);
pSMB->Fid = netfid; /* file handle always le */
pSMB->ByteCount = 0;
pSMB->hdr.smb_buf_length += pSMB->ByteCount;
rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
(struct smb_hdr *) pSMBr, &bytes_returned, 0);
if (rc) {
cFYI(1, ("Error in Notify = %d", rc));
}
if (pSMB)
cifs_buf_release(pSMB);
/* if (rc == -EAGAIN)
goto NotifyRetry; */
return rc;
}
......@@ -95,9 +95,15 @@ cifs_reconnect(struct TCP_Server_Info *server)
struct cifsTconInfo *tcon;
struct mid_q_entry * mid_entry;
if(server->tcpStatus == CifsExiting)
spin_lock(&GlobalMid_Lock);
if(server->tcpStatus == CifsExiting) {
/* the demux thread will exit normally
next time through the loop */
spin_unlock(&GlobalMid_Lock);
return rc;
server->tcpStatus = CifsNeedReconnect;
} else
server->tcpStatus = CifsNeedReconnect;
spin_unlock(&GlobalMid_Lock);
server->maxBuf = 0;
cFYI(1, ("Reconnecting tcp session "));
......@@ -164,7 +170,10 @@ cifs_reconnect(struct TCP_Server_Info *server)
schedule_timeout(3 * HZ);
} else {
atomic_inc(&tcpSesReconnectCount);
server->tcpStatus = CifsGood;
spin_lock(&GlobalMid_Lock);
if(server->tcpStatus != CifsExiting)
server->tcpStatus = CifsGood;
spin_unlock(&GlobalMid_Lock);
atomic_set(&server->inFlight,0);
wake_up(&server->response_q);
}
......@@ -243,12 +252,14 @@ cifs_demultiplex_thread(struct TCP_Server_Info *server)
/* some servers kill tcp session rather than returning
smb negprot error in which case reconnecting here is
not going to help - return error to mount */
spin_lock(&GlobalMid_Lock);
server->tcpStatus = CifsExiting;
spin_unlock(&GlobalMid_Lock);
wake_up(&server->response_q);
break;
}
cFYI(1,("Reconnecting after unexpected rcvmsg error "));
cFYI(1,("Reconnecting after unexpected peek error %d",length));
cifs_reconnect(server);
csocket = server->ssocket;
wake_up(&server->response_q);
......@@ -280,7 +291,9 @@ cifs_demultiplex_thread(struct TCP_Server_Info *server)
/* if nack on negprot (rather than
ret of smb negprot error) reconnecting
not going to help, ret error to mount */
spin_lock(&GlobalMid_Lock);
server->tcpStatus = CifsExiting;
spin_unlock(&GlobalMid_Lock);
/* wake up thread doing negprot */
wake_up(&server->response_q);
break;
......@@ -391,7 +404,9 @@ cifs_demultiplex_thread(struct TCP_Server_Info *server)
}
}
}
spin_lock(&GlobalMid_Lock);
server->tcpStatus = CifsExiting;
spin_unlock(&GlobalMid_Lock);
atomic_set(&server->inFlight, 0);
/* Although there should not be any requests blocked on
this queue it can not hurt to be paranoid and try to wake up requests
......@@ -595,6 +610,8 @@ cifs_parse_mount_options(char *options, const char *devname, struct smb_vol *vol
}
if ((temp_len = strnlen(value, 300)) < 300) {
vol->UNC = kmalloc(temp_len+1,GFP_KERNEL);
if(vol->UNC == NULL)
return 1;
strcpy(vol->UNC,value);
if (strncmp(vol->UNC, "//", 2) == 0) {
vol->UNC[0] = '\\';
......@@ -742,6 +759,8 @@ cifs_parse_mount_options(char *options, const char *devname, struct smb_vol *vol
}
if ((temp_len = strnlen(devname, 300)) < 300) {
vol->UNC = kmalloc(temp_len+1,GFP_KERNEL);
if(vol->UNC == NULL)
return 1;
strcpy(vol->UNC,devname);
if (strncmp(vol->UNC, "//", 2) == 0) {
vol->UNC[0] = '\\';
......@@ -1030,7 +1049,7 @@ ipv6_connect(struct sockaddr_in6 *psin_server, struct socket **csocket)
} else {
/* BB other socket options to set KEEPALIVE, NODELAY? */
cFYI(1,("ipv6 Socket created"));
(*csocket)->sk->sk_allocation = GFP_NOFS;
(*csocket)->sk->sk_allocation = GFP_NOFS;
}
}
......@@ -1226,6 +1245,9 @@ cifs_mount(struct super_block *sb, struct cifs_sb_info *cifs_sb,
init_waitqueue_head(&srvTcp->response_q);
init_waitqueue_head(&srvTcp->request_q);
INIT_LIST_HEAD(&srvTcp->pending_mid_q);
/* at this point we are the only ones with the pointer
to the struct since the kernel thread not created yet
so no need to spinlock this init of tcpStatus */
srvTcp->tcpStatus = CifsNew;
init_MUTEX(&srvTcp->tcpSem);
kernel_thread((void *)(void *)cifs_demultiplex_thread, srvTcp,
......@@ -1342,9 +1364,12 @@ cifs_mount(struct super_block *sb, struct cifs_sb_info *cifs_sb,
/* on error free sesinfo and tcon struct if needed */
if (rc) {
if(atomic_read(&srvTcp->socketUseCount) == 0)
srvTcp->tcpStatus = CifsExiting;
/* If find_unc succeeded then rc == 0 so we can not end */
if(atomic_read(&srvTcp->socketUseCount) == 0) {
spin_lock(&GlobalMid_Lock);
srvTcp->tcpStatus = CifsExiting;
spin_unlock(&GlobalMid_Lock);
}
/* If find_unc succeeded then rc == 0 so we can not end */
if (tcon) /* up here accidently freeing someone elses tcon struct */
tconInfoFree(tcon);
if (existingCifsSes == 0) {
......@@ -2791,7 +2816,7 @@ int cifs_setup_session(unsigned int xid, struct cifsSesInfo *pSesInfo,
char ntlm_session_key[CIFS_SESSION_KEY_SIZE];
int ntlmv2_flag = FALSE;
/* what if server changes its buffer size after dropping the session? */
/* what if server changes its buffer size after dropping the session? */
if(pSesInfo->server->maxBuf == 0) /* no need to send on reconnect */ {
rc = CIFSSMBNegotiate(xid, pSesInfo);
if(rc == -EAGAIN) /* retry only once on 1st time connection */ {
......@@ -2799,8 +2824,15 @@ int cifs_setup_session(unsigned int xid, struct cifsSesInfo *pSesInfo,
if(rc == -EAGAIN)
rc = -EHOSTDOWN;
}
if(rc == 0)
pSesInfo->server->tcpStatus = CifsGood;
if(rc == 0) {
spin_lock(&GlobalMid_Lock);
if(pSesInfo->server->tcpStatus != CifsExiting)
pSesInfo->server->tcpStatus = CifsGood;
else
rc = -EHOSTDOWN;
spin_unlock(&GlobalMid_Lock);
}
}
if (!rc) {
pSesInfo->capabilities = pSesInfo->server->capabilities;
......
......@@ -159,6 +159,7 @@ cifs_create(struct inode *inode, struct dentry *direntry, int mode,
struct cifsFileInfo * pCifsFile = NULL;
struct cifsInodeInfo * pCifsInode;
int disposition = FILE_OVERWRITE_IF;
int write_only = FALSE;
xid = GetXid();
......@@ -176,9 +177,10 @@ cifs_create(struct inode *inode, struct dentry *direntry, int mode,
if(nd) {
if ((nd->intent.open.flags & O_ACCMODE) == O_RDONLY)
desiredAccess = GENERIC_READ;
else if ((nd->intent.open.flags & O_ACCMODE) == O_WRONLY)
else if ((nd->intent.open.flags & O_ACCMODE) == O_WRONLY) {
desiredAccess = GENERIC_WRITE;
else if ((nd->intent.open.flags & O_ACCMODE) == O_RDWR) {
write_only = TRUE;
} else if ((nd->intent.open.flags & O_ACCMODE) == O_RDWR) {
/* GENERIC_ALL is too much permission to request */
/* can cause unnecessary access denied on create */
/* desiredAccess = GENERIC_ALL; */
......@@ -262,16 +264,25 @@ cifs_create(struct inode *inode, struct dentry *direntry, int mode,
pCifsFile->invalidHandle = FALSE;
pCifsFile->closePend = FALSE;
init_MUTEX(&pCifsFile->fh_sem);
/* pCifsFile->pfile = file; */ /* put in at open time */
/* put the following in at open now */
/* pCifsFile->pfile = file; */
write_lock(&GlobalSMBSeslock);
list_add(&pCifsFile->tlist,&pTcon->openFileList);
pCifsInode = CIFS_I(newinode);
if(pCifsInode) {
list_add(&pCifsFile->flist,&pCifsInode->openFileList);
/* if readable file instance put first in list*/
if (write_only == TRUE) {
list_add_tail(&pCifsFile->flist,
&pCifsInode->openFileList);
} else {
list_add(&pCifsFile->flist,
&pCifsInode->openFileList);
}
if((oplock & 0xF) == OPLOCK_EXCLUSIVE) {
pCifsInode->clientCanCacheAll = TRUE;
pCifsInode->clientCanCacheRead = TRUE;
cFYI(1,("Exclusive Oplock granted on inode %p",newinode));
cFYI(1,("Exclusive Oplock granted on inode %p",
newinode));
} else if((oplock & 0xF) == OPLOCK_READ)
pCifsInode->clientCanCacheRead = TRUE;
}
......
......@@ -32,9 +32,12 @@ int cifs_directory_notify(unsigned long arg, struct file * file)
{
int xid;
int rc = -EINVAL;
int oplock = FALSE;
struct cifs_sb_info *cifs_sb;
struct cifsTconInfo *pTcon;
char *full_path = NULL;
__u32 filter = FILE_NOTIFY_CHANGE_NAME | FILE_NOTIFY_CHANGE_ATTRIBUTES;
__u16 netfid;
xid = GetXid();
cifs_sb = CIFS_SB(file->f_dentry->d_sb);
......@@ -48,7 +51,20 @@ int cifs_directory_notify(unsigned long arg, struct file * file)
rc = -ENOMEM;
} else {
cFYI(1,("cifs dir notify on file %s",full_path));
/* CIFSSMBNotify(xid, pTcon, full_path, cifs_sb->local_nls);*/
rc = CIFSSMBOpen(xid, pTcon, full_path, FILE_OPEN,
GENERIC_READ | SYNCHRONIZE, 0 /* create options */,
&netfid, &oplock,NULL, cifs_sb->local_nls);
/* BB fixme - add this handle to a notify handle list */
if(rc) {
cFYI(1,("Could not open directory for notify"));
} else {
rc = CIFSSMBNotify(xid, pTcon, 1 /* subdirs */, netfid,
filter, cifs_sb->local_nls);
/* BB add code to close file eventually (at unmount
it would close automatically but may be a way
to do it easily when inode freed or when
notify info is cleared/changed */
}
}
FreeXid(xid);
......
This diff is collapsed.
......@@ -130,8 +130,18 @@ cifs_get_inode_info_unix(struct inode **pinode,
and blkbits set in superblock so 2**blkbits and blksize will match */
/* inode->i_blksize =
(pTcon->ses->server->maxBuf - MAX_CIFS_HDR_SIZE) & 0xFFFFFE00;*/
inode->i_blocks =
(inode->i_blksize - 1 + findData.NumOfBytes) >> inode->i_blkbits;
/* This seems incredibly stupid but it turns out that
i_blocks is not related to (i_size / i_blksize), instead a
size of 512 is required to be used for calculating num blocks */
/* inode->i_blocks =
(inode->i_blksize - 1 + findData.NumOfBytes) >> inode->i_blkbits;*/
/* 512 bytes (2**9) is the fake blocksize that must be used */
/* for this calculation */
inode->i_blocks = (512 - 1 + findData.NumOfBytes) >> 9;
if (findData.NumOfBytes < findData.EndOfFile)
cFYI(1, ("Server inconsistency Error: it says allocation size less than end of file "));
......@@ -275,8 +285,10 @@ cifs_get_inode_info(struct inode **pinode, const unsigned char *search_path,
}
i_size_write(inode,le64_to_cpu(pfindData->EndOfFile));
pfindData->AllocationSize = le64_to_cpu(pfindData->AllocationSize);
inode->i_blocks =
(inode->i_blksize - 1 + pfindData->AllocationSize) >> inode->i_blkbits;
/* 512 bytes (2**9) is the fake blocksize that must be used */
/* for this calculation */
inode->i_blocks = (512 - 1 + pfindData->AllocationSize) >> 9;
inode->i_nlink = le32_to_cpu(pfindData->NumberOfLinks);
......
......@@ -126,7 +126,6 @@ smb_send(struct socket *ssocket, struct smb_hdr *smb_buffer,
if(ssocket == NULL)
return -ENOTSOCK; /* BB eventually add reconnect code here */
/* ssocket->sk->allocation = GFP_BUFFER; *//* BB is this spurious? */
iov.iov_base = smb_buffer;
iov.iov_len = smb_buf_length + 4;
......
......@@ -1875,13 +1875,17 @@ static void ext3_commit_super (struct super_block * sb,
static void ext3_mark_recovery_complete(struct super_block * sb,
struct ext3_super_block * es)
{
journal_flush(EXT3_SB(sb)->s_journal);
journal_t *journal = EXT3_SB(sb)->s_journal;
journal_lock_updates(journal);
journal_flush(journal);
if (EXT3_HAS_INCOMPAT_FEATURE(sb, EXT3_FEATURE_INCOMPAT_RECOVER) &&
sb->s_flags & MS_RDONLY) {
EXT3_CLEAR_INCOMPAT_FEATURE(sb, EXT3_FEATURE_INCOMPAT_RECOVER);
sb->s_dirt = 0;
ext3_commit_super(sb, es, 1);
}
journal_unlock_updates(journal);
}
/*
......
......@@ -396,9 +396,19 @@ writeback_inodes(struct writeback_control *wbc)
sb = sb_entry(super_blocks.prev);
for (; sb != sb_entry(&super_blocks); sb = sb_entry(sb->s_list.prev)) {
if (!list_empty(&sb->s_dirty) || !list_empty(&sb->s_io)) {
/* we're making our own get_super here */
sb->s_count++;
spin_unlock(&sb_lock);
sync_sb_inodes(sb, wbc);
/*
* If we can't get the readlock, there's no sense in
* waiting around, most of the time the FS is going to
* be unmounted by the time it is released.
*/
if (down_read_trylock(&sb->s_umount)) {
if (sb->s_root)
sync_sb_inodes(sb, wbc);
up_read(&sb->s_umount);
}
spin_lock(&sb_lock);
if (__put_super(sb))
goto restart;
......
......@@ -412,7 +412,8 @@ void journal_commit_transaction(journal_t *journal)
tagp = &bh->b_data[sizeof(journal_header_t)];
space_left = bh->b_size - sizeof(journal_header_t);
first_tag = 1;
set_bit(BH_JWrite, &bh->b_state);
set_buffer_jwrite(bh);
set_buffer_dirty(bh);
wbuf[bufs++] = bh;
/* Record it so that we can wait for IO
......@@ -638,7 +639,8 @@ void journal_commit_transaction(journal_t *journal)
JBUFFER_TRACE(descriptor, "write commit block");
{
struct buffer_head *bh = jh2bh(descriptor);
set_buffer_uptodate(bh);
set_buffer_dirty(bh);
sync_dirty_buffer(bh);
if (unlikely(!buffer_uptodate(bh)))
err = -EIO;
......
......@@ -585,9 +585,13 @@ int journal_bmap(journal_t *journal, unsigned long blocknr,
* We play buffer_head aliasing tricks to write data/metadata blocks to
* the journal without copying their contents, but for journal
* descriptor blocks we do need to generate bona fide buffers.
*
* After the caller of journal_get_descriptor_buffer() has finished modifying
* the buffer's contents they really should run flush_dcache_page(bh->b_page).
* But we don't bother doing that, so there will be coherency problems with
* mmaps of blockdevs which hold live JBD-controlled filesystems.
*/
struct journal_head * journal_get_descriptor_buffer(journal_t *journal)
struct journal_head *journal_get_descriptor_buffer(journal_t *journal)
{
struct buffer_head *bh;
unsigned long blocknr;
......@@ -599,8 +603,10 @@ struct journal_head * journal_get_descriptor_buffer(journal_t *journal)
return NULL;
bh = __getblk(journal->j_dev, blocknr, journal->j_blocksize);
lock_buffer(bh);
memset(bh->b_data, 0, journal->j_blocksize);
bh->b_state |= (1 << BH_Dirty);
set_buffer_uptodate(bh);
unlock_buffer(bh);
BUFFER_TRACE(bh, "return this buffer");
return journal_add_journal_head(bh);
}
......
......@@ -522,7 +522,7 @@ void journal_write_revoke_records(journal_t *journal,
kmem_cache_free(revoke_record_cache, record);
}
}
if (descriptor)
if (descriptor)
flush_descriptor(journal, descriptor, offset);
jbd_debug(1, "Wrote %d revoke records\n", count);
}
......@@ -606,7 +606,7 @@ static void flush_descriptor(journal_t *journal,
header->r_count = htonl(offset);
set_buffer_jwrite(bh);
BUFFER_TRACE(bh, "write");
set_buffer_uptodate(bh);
set_buffer_dirty(bh);
ll_rw_block(WRITE, 1, &bh);
}
#endif
......
......@@ -466,6 +466,7 @@ type name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, type5 arg5, type6 arg6
#define __ARCH_WANT_SYS_OLDUMOUNT
#define __ARCH_WANT_SYS_SIGPENDING
#define __ARCH_WANT_SYS_SIGPROCMASK
#define __ARCH_WANT_SYS_RT_SIGACTION
#endif
#ifdef __KERNEL_SYSCALLS__
......
......@@ -397,6 +397,7 @@ type name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, type5 arg5) { \
#define __ARCH_WANT_SYS_OLDUMOUNT
#define __ARCH_WANT_SYS_SIGPENDING
#define __ARCH_WANT_SYS_SIGPROCMASK
#define __ARCH_WANT_SYS_RT_SIGACTION
#endif
#ifdef __KERNEL_SYSCALLS__
......
......@@ -314,6 +314,7 @@
#define __ARCH_WANT_SYS_OLDUMOUNT
#define __ARCH_WANT_SYS_SIGPENDING
#define __ARCH_WANT_SYS_SIGPROCMASK
#define __ARCH_WANT_SYS_RT_SIGACTION
#endif
#ifdef __KERNEL_SYSCALLS__
......
......@@ -468,6 +468,7 @@ type name(atype a, btype b, ctype c, dtype d, etype e, ftype f) \
#define __ARCH_WANT_SYS_OLDUMOUNT
#define __ARCH_WANT_SYS_SIGPENDING
#define __ARCH_WANT_SYS_SIGPROCMASK
#define __ARCH_WANT_SYS_RT_SIGACTION
#endif
#ifdef __KERNEL_SYSCALLS__
......
......@@ -403,6 +403,7 @@ __syscall_return(type,__res); \
#define __ARCH_WANT_SYS_OLDUMOUNT
#define __ARCH_WANT_SYS_SIGPENDING
#define __ARCH_WANT_SYS_SIGPROCMASK
#define __ARCH_WANT_SYS_RT_SIGACTION
#endif
#ifdef __KERNEL_SYSCALLS__
......
......@@ -275,6 +275,7 @@
# define __ARCH_WANT_SYS_OLDUMOUNT
# define __ARCH_WANT_SYS_SIGPENDING
# define __ARCH_WANT_SYS_SIGPROCMASK
# define __ARCH_WANT_SYS_RT_SIGACTION
#endif
#if !defined(__ASSEMBLY__) && !defined(ASSEMBLER)
......
......@@ -359,6 +359,7 @@ __syscall_return(type,__res); \
#define __ARCH_WANT_SYS_OLDUMOUNT
#define __ARCH_WANT_SYS_SIGPENDING
#define __ARCH_WANT_SYS_SIGPROCMASK
#define __ARCH_WANT_SYS_RT_SIGACTION
#endif
#ifdef __KERNEL_SYSCALLS__
......
......@@ -394,6 +394,7 @@ type name(atype a, btype b, ctype c, dtype d, etype e) \
#define __ARCH_WANT_SYS_OLDUMOUNT
#define __ARCH_WANT_SYS_SIGPENDING
#define __ARCH_WANT_SYS_SIGPROCMASK
#define __ARCH_WANT_SYS_RT_SIGACTION
#endif
#ifdef __KERNEL_SYSCALLS__
......
......@@ -1091,6 +1091,7 @@ type name (atype a,btype b,ctype c,dtype d,etype e,ftype f) \
#define __ARCH_WANT_SYS_OLDUMOUNT
#define __ARCH_WANT_SYS_SIGPENDING
#define __ARCH_WANT_SYS_SIGPROCMASK
#define __ARCH_WANT_SYS_RT_SIGACTION
# ifndef __mips64
# define __ARCH_WANT_STAT64
# endif
......
......@@ -899,6 +899,7 @@ type name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, type5 arg5) \
#define __ARCH_WANT_SYS_OLDUMOUNT
#define __ARCH_WANT_SYS_SIGPENDING
#define __ARCH_WANT_SYS_SIGPROCMASK
#define __ARCH_WANT_SYS_RT_SIGACTION
#endif
/* mmap & mmap2 take 6 arguments */
......
......@@ -402,6 +402,7 @@ type name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, type5 arg5) \
#define __ARCH_WANT_SYS_OLDUMOUNT
#define __ARCH_WANT_SYS_SIGPENDING
#define __ARCH_WANT_SYS_SIGPROCMASK
#define __ARCH_WANT_SYS_RT_SIGACTION
/*
* Forking from kernel space will result in the child getting a new,
......
......@@ -439,6 +439,7 @@ static inline _syscall3(int, execve, __const__ char *, file, char **, argv,
#define __ARCH_WANT_SYS_OLDUMOUNT
#define __ARCH_WANT_SYS_SIGPENDING
#define __ARCH_WANT_SYS_SIGPROCMASK
#define __ARCH_WANT_SYS_RT_SIGACTION
unsigned long sys_mmap(unsigned long addr, size_t len, unsigned long prot,
unsigned long flags, unsigned long fd, off_t offset);
......
......@@ -43,8 +43,6 @@ struct iommu_table;
int vio_register_driver(struct vio_driver *drv);
void vio_unregister_driver(struct vio_driver *drv);
const struct vio_device_id * vio_match_device(const struct vio_device_id *ids,
const struct vio_dev *dev);
struct vio_dev * __devinit vio_register_device(struct device_node *node_vdev);
void __devinit vio_unregister_device(struct vio_dev *dev);
......@@ -52,7 +50,6 @@ struct vio_dev *vio_find_node(struct device_node *vnode);
const void * vio_get_attribute(struct vio_dev *vdev, void* which, int* length);
int vio_get_irq(struct vio_dev *dev);
struct iommu_table * vio_build_iommu_table(struct vio_dev *dev);
int vio_enable_interrupts(struct vio_dev *dev);
int vio_disable_interrupts(struct vio_dev *dev);
......@@ -110,8 +107,6 @@ static inline struct vio_driver *to_vio_driver(struct device_driver *drv)
* The vio_dev structure is used to describe virtual I/O devices.
*/
struct vio_dev {
struct device_node *archdata; /* Open Firmware node */
void *driver_data; /* data private to the driver */
struct iommu_table *iommu_table; /* vio_map_* uses this */
uint32_t unit_address;
unsigned int irq;
......
......@@ -303,6 +303,7 @@ typedef struct
*/
struct pt_regs
{
unsigned long args[1];
psw_t psw;
unsigned long gprs[NUM_GPRS];
unsigned long orig_gpr2;
......
......@@ -530,6 +530,7 @@ type name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
#define __ARCH_WANT_SYS_OLDUMOUNT
#define __ARCH_WANT_SYS_SIGPENDING
#define __ARCH_WANT_SYS_SIGPROCMASK
#define __ARCH_WANT_SYS_RT_SIGACTION
# ifndef CONFIG_ARCH_S390X
# define __ARCH_WANT_STAT64
# endif
......
......@@ -422,6 +422,7 @@ __syscall_return(type,__sc0); \
#define __ARCH_WANT_SYS_OLDUMOUNT
#define __ARCH_WANT_SYS_SIGPENDING
#define __ARCH_WANT_SYS_SIGPROCMASK
#define __ARCH_WANT_SYS_RT_SIGACTION
#endif
#ifdef __KERNEL_SYSCALLS__
......
......@@ -34,6 +34,7 @@ extern int um_execve(const char *file, char *const argv[], char *const env[]);
#define __ARCH_WANT_SYS_OLDUMOUNT
#define __ARCH_WANT_SYS_SIGPENDING
#define __ARCH_WANT_SYS_SIGPROCMASK
#define __ARCH_WANT_SYS_RT_SIGACTION
#endif
#ifdef __KERNEL_SYSCALLS__
......
......@@ -408,6 +408,7 @@ type name (atype a, btype b, ctype c, dtype d, etype e, ftype f) \
#define __ARCH_WANT_SYS_OLDUMOUNT
#define __ARCH_WANT_SYS_SIGPENDING
#define __ARCH_WANT_SYS_SIGPROCMASK
#define __ARCH_WANT_SYS_RT_SIGACTION
#endif
#ifdef __KERNEL_SYSCALLS__
......
......@@ -591,6 +591,7 @@ do { \
#define __ARCH_WANT_SYS_OLDUMOUNT
#define __ARCH_WANT_SYS_SIGPENDING
#define __ARCH_WANT_SYS_SIGPROCMASK
#define __ARCH_WANT_SYS_RT_SIGACTION
#endif
#ifndef __KERNEL_SYSCALLS__
......
......@@ -133,7 +133,7 @@ struct page_state {
unsigned long allocstall; /* direct reclaim calls */
unsigned long pgrotated; /* pages rotated to tail of the LRU */
} ____cacheline_aligned;
};
DECLARE_PER_CPU(struct page_state, page_states);
......
......@@ -738,7 +738,7 @@ extern void FASTCALL(wake_up_forked_process(struct task_struct * tsk));
static inline void kick_process(struct task_struct *tsk) { }
static inline void wake_up_forked_thread(struct task_struct * tsk)
{
return wake_up_forked_process(tsk);
wake_up_forked_process(tsk);
}
#endif
extern void FASTCALL(sched_fork(task_t * p));
......
......@@ -40,7 +40,7 @@
#define UDFFS_DATE "2004/29/09"
#define UDFFS_VERSION "0.9.8.1"
#define UDFFS_DEBUG
#undef UDFFS_DEBUG
#ifdef UDFFS_DEBUG
#define udf_debug(f, a...) \
......
......@@ -271,7 +271,6 @@ static inline int dup_mmap(struct mm_struct * mm, struct mm_struct * oldmm)
struct vm_area_struct * mpnt, *tmp, **pprev;
struct rb_node **rb_link, *rb_parent;
int retval;
unsigned long charge = 0;
struct mempolicy *pol;
down_write(&oldmm->mmap_sem);
......@@ -308,7 +307,6 @@ static inline int dup_mmap(struct mm_struct * mm, struct mm_struct * oldmm)
unsigned int len = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT;
if (security_vm_enough_memory(len))
goto fail_nomem;
charge += len;
}
tmp = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
if (!tmp)
......@@ -360,7 +358,7 @@ static inline int dup_mmap(struct mm_struct * mm, struct mm_struct * oldmm)
tmp->vm_ops->open(tmp);
if (retval)
goto fail;
goto out;
}
retval = 0;
......@@ -372,8 +370,6 @@ static inline int dup_mmap(struct mm_struct * mm, struct mm_struct * oldmm)
kmem_cache_free(vm_area_cachep, tmp);
fail_nomem:
retval = -ENOMEM;
fail:
vm_unacct_memory(charge);
goto out;
}
static inline int mm_alloc_pgd(struct mm_struct * mm)
......
......@@ -2477,7 +2477,7 @@ sys_sigprocmask(int how, old_sigset_t __user *set, old_sigset_t __user *oset)
}
#endif /* __ARCH_WANT_SYS_SIGPROCMASK */
#ifndef __sparc__
#ifdef __ARCH_WANT_SYS_RT_SIGACTION
asmlinkage long
sys_rt_sigaction(int sig,
const struct sigaction __user *act,
......@@ -2505,7 +2505,7 @@ sys_rt_sigaction(int sig,
out:
return ret;
}
#endif /* __sparc__ */
#endif /* __ARCH_WANT_SYS_RT_SIGACTION */
#ifdef __ARCH_WANT_SYS_SGETMASK
......
......@@ -218,7 +218,7 @@ static ctl_table kern_table[] = {
.ctl_name = KERN_OSTYPE,
.procname = "ostype",
.data = system_utsname.sysname,
.maxlen = 64,
.maxlen = sizeof(system_utsname.sysname),
.mode = 0444,
.proc_handler = &proc_doutsstring,
.strategy = &sysctl_string,
......@@ -227,7 +227,7 @@ static ctl_table kern_table[] = {
.ctl_name = KERN_OSRELEASE,
.procname = "osrelease",
.data = system_utsname.release,
.maxlen = 64,
.maxlen = sizeof(system_utsname.release),
.mode = 0444,
.proc_handler = &proc_doutsstring,
.strategy = &sysctl_string,
......@@ -236,7 +236,7 @@ static ctl_table kern_table[] = {
.ctl_name = KERN_VERSION,
.procname = "version",
.data = system_utsname.version,
.maxlen = 64,
.maxlen = sizeof(system_utsname.version),
.mode = 0444,
.proc_handler = &proc_doutsstring,
.strategy = &sysctl_string,
......@@ -245,7 +245,7 @@ static ctl_table kern_table[] = {
.ctl_name = KERN_NODENAME,
.procname = "hostname",
.data = system_utsname.nodename,
.maxlen = 64,
.maxlen = sizeof(system_utsname.nodename),
.mode = 0644,
.proc_handler = &proc_doutsstring,
.strategy = &sysctl_string,
......@@ -254,7 +254,7 @@ static ctl_table kern_table[] = {
.ctl_name = KERN_DOMAINNAME,
.procname = "domainname",
.data = system_utsname.domainname,
.maxlen = 64,
.maxlen = sizeof(system_utsname.domainname),
.mode = 0644,
.proc_handler = &proc_doutsstring,
.strategy = &sysctl_string,
......@@ -1917,56 +1917,56 @@ int proc_dointvec_userhz_jiffies(ctl_table *table, int write, struct file *filp,
#else /* CONFIG_PROC_FS */
int proc_dostring(ctl_table *table, int write, struct file *filp,
void *buffer, size_t *lenp)
void __user *buffer, size_t *lenp)
{
return -ENOSYS;
}
static int proc_doutsstring(ctl_table *table, int write, struct file *filp,
void *buffer, size_t *lenp)
void __user *buffer, size_t *lenp)
{
return -ENOSYS;
}
int proc_dointvec(ctl_table *table, int write, struct file *filp,
void *buffer, size_t *lenp)
void __user *buffer, size_t *lenp)
{
return -ENOSYS;
}
int proc_dointvec_bset(ctl_table *table, int write, struct file *filp,
void *buffer, size_t *lenp)
void __user *buffer, size_t *lenp)
{
return -ENOSYS;
}
int proc_dointvec_minmax(ctl_table *table, int write, struct file *filp,
void *buffer, size_t *lenp)
void __user *buffer, size_t *lenp)
{
return -ENOSYS;
}
int proc_dointvec_jiffies(ctl_table *table, int write, struct file *filp,
void *buffer, size_t *lenp)
void __user *buffer, size_t *lenp)
{
return -ENOSYS;
}
int proc_dointvec_userhz_jiffies(ctl_table *table, int write, struct file *filp,
void *buffer, size_t *lenp)
void __user *buffer, size_t *lenp)
{
return -ENOSYS;
}
int proc_doulongvec_minmax(ctl_table *table, int write, struct file *filp,
void *buffer, size_t *lenp)
void __user *buffer, size_t *lenp)
{
return -ENOSYS;
}
int proc_doulongvec_ms_jiffies_minmax(ctl_table *table, int write,
struct file *filp,
void *buffer, size_t *lenp)
void __user *buffer, size_t *lenp)
{
return -ENOSYS;
}
......@@ -2141,13 +2141,13 @@ int proc_dointvec_minmax(ctl_table *table, int write, struct file *filp,
}
int proc_dointvec_jiffies(ctl_table *table, int write, struct file *filp,
void *buffer, size_t *lenp)
void __user *buffer, size_t *lenp)
{
return -ENOSYS;
}
int proc_dointvec_userhz_jiffies(ctl_table *table, int write, struct file *filp,
void *buffer, size_t *lenp)
void __user *buffer, size_t *lenp)
{
return -ENOSYS;
}
......
......@@ -218,6 +218,33 @@ static int worker_thread(void *__cwq)
return 0;
}
static void flush_cpu_workqueue(struct cpu_workqueue_struct *cwq)
{
if (cwq->thread == current) {
/*
* Probably keventd trying to flush its own queue. So simply run
* it by hand rather than deadlocking.
*/
run_workqueue(cwq);
} else {
DEFINE_WAIT(wait);
long sequence_needed;
spin_lock_irq(&cwq->lock);
sequence_needed = cwq->insert_sequence;
while (sequence_needed - cwq->remove_sequence > 0) {
prepare_to_wait(&cwq->work_done, &wait,
TASK_UNINTERRUPTIBLE);
spin_unlock_irq(&cwq->lock);
schedule();
spin_lock_irq(&cwq->lock);
}
finish_wait(&cwq->work_done, &wait);
spin_unlock_irq(&cwq->lock);
}
}
/*
* flush_workqueue - ensure that any scheduled work has run to completion.
*
......@@ -234,43 +261,19 @@ static int worker_thread(void *__cwq)
*/
void fastcall flush_workqueue(struct workqueue_struct *wq)
{
struct cpu_workqueue_struct *cwq;
int cpu;
might_sleep();
lock_cpu_hotplug();
for_each_online_cpu(cpu) {
DEFINE_WAIT(wait);
long sequence_needed;
if (is_single_threaded(wq))
cwq = wq->cpu_wq + 0; /* Always use cpu 0's area. */
else
cwq = wq->cpu_wq + cpu;
if (cwq->thread == current) {
/*
* Probably keventd trying to flush its own queue.
* So simply run it by hand rather than deadlocking.
*/
run_workqueue(cwq);
continue;
}
spin_lock_irq(&cwq->lock);
sequence_needed = cwq->insert_sequence;
if (is_single_threaded(wq)) {
/* Always use cpu 0's area. */
flush_cpu_workqueue(wq->cpu_wq + 0);
} else {
int cpu;
while (sequence_needed - cwq->remove_sequence > 0) {
prepare_to_wait(&cwq->work_done, &wait,
TASK_UNINTERRUPTIBLE);
spin_unlock_irq(&cwq->lock);
schedule();
spin_lock_irq(&cwq->lock);
}
finish_wait(&cwq->work_done, &wait);
spin_unlock_irq(&cwq->lock);
lock_cpu_hotplug();
for_each_online_cpu(cpu)
flush_cpu_workqueue(wq->cpu_wq + cpu);
unlock_cpu_hotplug();
}
unlock_cpu_hotplug();
}
static struct task_struct *create_workqueue_thread(struct workqueue_struct *wq,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment