Commit 9ff086a3 authored by Linus Torvalds's avatar Linus Torvalds

v2.4.12.6 -> v2.4.13

  - page write-out throttling
  - Pete Zaitcev: ymfpci sound driver update (make Civ:CTP happy with it)
  - Alan Cox: i2o sync-up
  - Andrea Arcangeli: revert broken x86 smp_call_function patch
  - me: handle VM write load more gracefully. Merge parts of -aa VM
parent aed492fc
...@@ -1693,6 +1693,12 @@ M: jpr@f6fbb.org ...@@ -1693,6 +1693,12 @@ M: jpr@f6fbb.org
L: linux-hams@vger.kernel.org L: linux-hams@vger.kernel.org
S: Maintained S: Maintained
YMFPCI YAMAHA PCI SOUND
P: Pete Zaitcev
M: zaitcev@yahoo.com
L: linux-kernel@vger.kernel.org
S: Maintained
Z85230 SYNCHRONOUS DRIVER Z85230 SYNCHRONOUS DRIVER
P: Alan Cox P: Alan Cox
M: alan@redhat.com M: alan@redhat.com
......
VERSION = 2 VERSION = 2
PATCHLEVEL = 4 PATCHLEVEL = 4
SUBLEVEL = 13 SUBLEVEL = 13
EXTRAVERSION =-pre6 EXTRAVERSION =
KERNELRELEASE=$(VERSION).$(PATCHLEVEL).$(SUBLEVEL)$(EXTRAVERSION) KERNELRELEASE=$(VERSION).$(PATCHLEVEL).$(SUBLEVEL)$(EXTRAVERSION)
......
...@@ -507,10 +507,9 @@ struct call_data_struct { ...@@ -507,10 +507,9 @@ struct call_data_struct {
atomic_t started; atomic_t started;
atomic_t finished; atomic_t finished;
int wait; int wait;
} __attribute__ ((__aligned__(SMP_CACHE_BYTES))); };
static struct call_data_struct * call_data; static struct call_data_struct * call_data;
static struct call_data_struct call_data_array[NR_CPUS];
/* /*
* this function sends a 'generic call function' IPI to all other CPUs * this function sends a 'generic call function' IPI to all other CPUs
...@@ -532,45 +531,33 @@ int smp_call_function (void (*func) (void *info), void *info, int nonatomic, ...@@ -532,45 +531,33 @@ int smp_call_function (void (*func) (void *info), void *info, int nonatomic,
* hardware interrupt handler, you may call it from a bottom half handler. * hardware interrupt handler, you may call it from a bottom half handler.
*/ */
{ {
struct call_data_struct *data; struct call_data_struct data;
int cpus = (cpu_online_map & ~(1 << smp_processor_id())); int cpus = smp_num_cpus-1;
if (!cpus) if (!cpus)
return 0; return 0;
data = &call_data_array[smp_processor_id()]; data.func = func;
data.info = info;
data->func = func; atomic_set(&data.started, 0);
data->info = info; data.wait = wait;
data->wait = wait;
if (wait) if (wait)
atomic_set(&data->finished, 0); atomic_set(&data.finished, 0);
/* We have do to this one last to make sure that the IPI service
* code desn't get confused if it gets an unexpected repeat spin_lock_bh(&call_lock);
* trigger of an old IPI while we're still setting up the new call_data = &data;
* one. */ wmb();
atomic_set(&data->started, 0);
local_bh_disable();
spin_lock(&call_lock);
call_data = data;
/* Send a message to all other CPUs and wait for them to respond */ /* Send a message to all other CPUs and wait for them to respond */
send_IPI_allbutself(CALL_FUNCTION_VECTOR); send_IPI_allbutself(CALL_FUNCTION_VECTOR);
/* Wait for response */ /* Wait for response */
while (atomic_read(&data->started) != cpus) while (atomic_read(&data.started) != cpus)
barrier(); barrier();
/* It is now safe to reuse the "call_data" global, but we need
* to keep local bottom-halves disabled until after waiters have
* been acknowledged to prevent reuse of the per-cpu call data
* entry. */
spin_unlock(&call_lock);
if (wait) if (wait)
while (atomic_read(&data->finished) != cpus) while (atomic_read(&data.finished) != cpus)
barrier(); barrier();
local_bh_enable(); spin_unlock_bh(&call_lock);
return 0; return 0;
} }
...@@ -620,17 +607,18 @@ asmlinkage void smp_call_function_interrupt(void) ...@@ -620,17 +607,18 @@ asmlinkage void smp_call_function_interrupt(void)
ack_APIC_irq(); ack_APIC_irq();
/* /*
* Notify initiating CPU that I've grabbed the data and am about * Notify initiating CPU that I've grabbed the data and am
* to execute the function (and avoid servicing any single IPI * about to execute the function
* twice)
*/ */
if (test_and_set_bit(smp_processor_id(), &call_data->started)) mb();
return; atomic_inc(&call_data->started);
/* /*
* At this point the info structure may be out of scope unless wait==1 * At this point the info structure may be out of scope unless wait==1
*/ */
(*func)(info); (*func)(info);
if (wait) if (wait) {
set_bit(smp_processor_id(), &call_data->finished); mb();
atomic_inc(&call_data->finished);
}
} }
...@@ -47,6 +47,7 @@ ...@@ -47,6 +47,7 @@
#include <linux/sched.h> #include <linux/sched.h>
#include <linux/fs.h> #include <linux/fs.h>
#include <linux/stat.h> #include <linux/stat.h>
#include <linux/pci.h>
#include <linux/errno.h> #include <linux/errno.h>
#include <linux/file.h> #include <linux/file.h>
#include <linux/ioctl.h> #include <linux/ioctl.h>
...@@ -55,6 +56,7 @@ ...@@ -55,6 +56,7 @@
#include <linux/blkpg.h> #include <linux/blkpg.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/hdreg.h> #include <linux/hdreg.h>
#include <linux/spinlock.h>
#include <linux/notifier.h> #include <linux/notifier.h>
#include <linux/reboot.h> #include <linux/reboot.h>
...@@ -78,9 +80,9 @@ ...@@ -78,9 +80,9 @@
//#define DRIVERDEBUG //#define DRIVERDEBUG
#ifdef DRIVERDEBUG #ifdef DRIVERDEBUG
#define DEBUG( s ) #define DEBUG( s ) printk( s )
#else #else
#define DEBUG( s ) printk( s ) #define DEBUG( s )
#endif #endif
/* /*
...@@ -508,7 +510,13 @@ static void i2o_block_reply(struct i2o_handler *h, struct i2o_controller *c, str ...@@ -508,7 +510,13 @@ static void i2o_block_reply(struct i2o_handler *h, struct i2o_controller *c, str
u32 *m = (u32 *)msg; u32 *m = (u32 *)msg;
u8 unit = (m[2]>>8)&0xF0; /* low 4 bits are partition */ u8 unit = (m[2]>>8)&0xF0; /* low 4 bits are partition */
struct i2ob_device *dev = &i2ob_dev[(unit&0xF0)]; struct i2ob_device *dev = &i2ob_dev[(unit&0xF0)];
/*
* Pull the lock over ready
*/
spin_lock_prefetch(&io_request_lock);
/* /*
* FAILed message * FAILed message
*/ */
...@@ -1140,7 +1148,7 @@ static int i2ob_ioctl(struct inode *inode, struct file *file, ...@@ -1140,7 +1148,7 @@ static int i2ob_ioctl(struct inode *inode, struct file *file,
dev = &i2ob_dev[minor]; dev = &i2ob_dev[minor];
switch (cmd) { switch (cmd) {
case BLKGETSIZE: case BLKGETSIZE:
return put_user(i2ob[minor].nr_sects, (unsigned long *) arg); return put_user(i2ob[minor].nr_sects, (long *) arg);
case BLKGETSIZE64: case BLKGETSIZE64:
return put_user((u64)i2ob[minor].nr_sects << 9, (u64 *)arg); return put_user((u64)i2ob[minor].nr_sects << 9, (u64 *)arg);
...@@ -1197,6 +1205,9 @@ static int i2ob_release(struct inode *inode, struct file *file) ...@@ -1197,6 +1205,9 @@ static int i2ob_release(struct inode *inode, struct file *file)
if(!dev->i2odev) if(!dev->i2odev)
return 0; return 0;
/* Sync the device so we don't get errors */
fsync_dev(inode->i_rdev);
if (dev->refcnt <= 0) if (dev->refcnt <= 0)
printk(KERN_ALERT "i2ob_release: refcount(%d) <= 0\n", dev->refcnt); printk(KERN_ALERT "i2ob_release: refcount(%d) <= 0\n", dev->refcnt);
dev->refcnt--; dev->refcnt--;
...@@ -1741,30 +1752,10 @@ void i2ob_del_device(struct i2o_controller *c, struct i2o_device *d) ...@@ -1741,30 +1752,10 @@ void i2ob_del_device(struct i2o_controller *c, struct i2o_device *d)
} }
spin_unlock_irqrestore(&io_request_lock, flags); spin_unlock_irqrestore(&io_request_lock, flags);
/*
* Sync the device...this will force all outstanding I/Os
* to attempt to complete, thus causing error messages.
* We have to do this as the user could immediatelly create
* a new volume that gets assigned the same minor number.
* If there are still outstanding writes to the device,
* that could cause data corruption on the new volume!
*
* The truth is that deleting a volume that you are currently
* accessing will do _bad things_ to your system. This
* handler will keep it from crashing, but must probably
* you'll have to do a 'reboot' to get the system running
* properly. Deleting disks you are using is dumb.
* Umount them first and all will be good!
*
* It's not this driver's job to protect the system from
* dumb user mistakes :)
*/
if(i2ob_dev[unit].refcnt)
fsync_dev(MKDEV(MAJOR_NR,unit));
/* /*
* Decrease usage count for module * Decrease usage count for module
*/ */
while(i2ob_dev[unit].refcnt--) while(i2ob_dev[unit].refcnt--)
MOD_DEC_USE_COUNT; MOD_DEC_USE_COUNT;
...@@ -1986,10 +1977,11 @@ int i2o_block_init(void) ...@@ -1986,10 +1977,11 @@ int i2o_block_init(void)
EXPORT_NO_SYMBOLS; EXPORT_NO_SYMBOLS;
MODULE_AUTHOR("Red Hat Software"); MODULE_AUTHOR("Red Hat Software");
MODULE_DESCRIPTION("I2O Block Device OSM"); MODULE_DESCRIPTION("I2O Block Device OSM");
MODULE_LICENSE("GPL");
void cleanup_module(void) void cleanup_module(void)
{ {
struct gendisk *gdp;
int i; int i;
if(evt_running) { if(evt_running) {
......
...@@ -961,5 +961,6 @@ void cleanup_module(void) ...@@ -961,5 +961,6 @@ void cleanup_module(void)
EXPORT_NO_SYMBOLS; EXPORT_NO_SYMBOLS;
MODULE_AUTHOR("Red Hat Software"); MODULE_AUTHOR("Red Hat Software");
MODULE_DESCRIPTION("I2O Configuration"); MODULE_DESCRIPTION("I2O Configuration");
MODULE_LICENSE("GPL");
#endif #endif
...@@ -246,8 +246,8 @@ static void i2o_core_reply(struct i2o_handler *h, struct i2o_controller *c, ...@@ -246,8 +246,8 @@ static void i2o_core_reply(struct i2o_handler *h, struct i2o_controller *c,
/* Release the preserved msg by resubmitting it as a NOP */ /* Release the preserved msg by resubmitting it as a NOP */
preserved_msg[0] = THREE_WORD_MSG_SIZE | SGL_OFFSET_0; preserved_msg[0] = cpu_to_le32(THREE_WORD_MSG_SIZE | SGL_OFFSET_0);
preserved_msg[1] = I2O_CMD_UTIL_NOP << 24 | HOST_TID << 12 | 0; preserved_msg[1] = cpu_to_le32(I2O_CMD_UTIL_NOP << 24 | HOST_TID << 12 | 0);
preserved_msg[2] = 0; preserved_msg[2] = 0;
i2o_post_message(c, msg[7]); i2o_post_message(c, msg[7]);
...@@ -606,7 +606,10 @@ int i2o_delete_controller(struct i2o_controller *c) ...@@ -606,7 +606,10 @@ int i2o_delete_controller(struct i2o_controller *c)
up(&i2o_configuration_lock); up(&i2o_configuration_lock);
if(c->page_frame) if(c->page_frame)
{
pci_unmap_single(c->pdev, c->page_frame_map, MSG_POOL_SIZE, PCI_DMA_FROMDEVICE);
kfree(c->page_frame); kfree(c->page_frame);
}
if(c->hrt) if(c->hrt)
kfree(c->hrt); kfree(c->hrt);
if(c->lct) if(c->lct)
...@@ -1180,9 +1183,21 @@ void i2o_run_queue(struct i2o_controller *c) ...@@ -1180,9 +1183,21 @@ void i2o_run_queue(struct i2o_controller *c)
while(mv!=0xFFFFFFFF) while(mv!=0xFFFFFFFF)
{ {
struct i2o_handler *i; struct i2o_handler *i;
m=(struct i2o_message *)bus_to_virt(mv); /* Map the message from the page frame map to kernel virtual */
m=(struct i2o_message *)(mv - (unsigned long)c->page_frame_map + (unsigned long)c->page_frame);
msg=(u32*)m; msg=(u32*)m;
/*
* Ensure this message is seen coherently but cachably by
* the processor
*/
pci_dma_sync_single(c->pdev, c->page_frame_map, MSG_FRAME_SIZE, PCI_DMA_FROMDEVICE);
/*
* Despatch it
*/
i=i2o_handlers[m->initiator_context&(MAX_I2O_MODULES-1)]; i=i2o_handlers[m->initiator_context&(MAX_I2O_MODULES-1)];
if(i && i->reply) if(i && i->reply)
i->reply(i,c,m); i->reply(i,c,m);
...@@ -1985,7 +2000,7 @@ static int i2o_systab_send(struct i2o_controller *iop) ...@@ -1985,7 +2000,7 @@ static int i2o_systab_send(struct i2o_controller *iop)
msg[0] = I2O_MESSAGE_SIZE(12) | SGL_OFFSET_6; msg[0] = I2O_MESSAGE_SIZE(12) | SGL_OFFSET_6;
msg[1] = I2O_CMD_SYS_TAB_SET<<24 | HOST_TID<<12 | ADAPTER_TID; msg[1] = I2O_CMD_SYS_TAB_SET<<24 | HOST_TID<<12 | ADAPTER_TID;
msg[3] = 0; msg[3] = 0;
msg[4] = (0<<16) | ((iop->unit+2) << 12); /* Host 0 IOP ID (unit + 2) */ msg[4] = (0<<16) | ((iop->unit+2) ); /* Host 0 IOP ID (unit + 2) */
msg[5] = 0; /* Segment 0 */ msg[5] = 0; /* Segment 0 */
/* /*
...@@ -2258,11 +2273,21 @@ int i2o_post_outbound_messages(struct i2o_controller *c) ...@@ -2258,11 +2273,21 @@ int i2o_post_outbound_messages(struct i2o_controller *c)
c->page_frame = kmalloc(MSG_POOL_SIZE, GFP_KERNEL); c->page_frame = kmalloc(MSG_POOL_SIZE, GFP_KERNEL);
if(c->page_frame==NULL) { if(c->page_frame==NULL) {
printk(KERN_CRIT "%s: Outbound Q initialize failed; out of memory.\n", printk(KERN_ERR "%s: Outbound Q initialize failed; out of memory.\n",
c->name); c->name);
return -ENOMEM; return -ENOMEM;
} }
m=virt_to_bus(c->page_frame);
c->page_frame_map = pci_map_single(c->pdev, c->page_frame, MSG_POOL_SIZE, PCI_DMA_FROMDEVICE);
if(c->page_frame_map == 0)
{
kfree(c->page_frame);
printk(KERN_ERR "%s: Unable to map outbound queue.\n", c->name);
return -ENOMEM;
}
m = c->page_frame_map;
/* Post frames */ /* Post frames */
...@@ -3427,6 +3452,8 @@ EXPORT_SYMBOL(i2o_get_class_name); ...@@ -3427,6 +3452,8 @@ EXPORT_SYMBOL(i2o_get_class_name);
MODULE_AUTHOR("Red Hat Software"); MODULE_AUTHOR("Red Hat Software");
MODULE_DESCRIPTION("I2O Core"); MODULE_DESCRIPTION("I2O Core");
MODULE_LICENSE("GPL");
int init_module(void) int init_module(void)
......
/* /*
* drivers/message/i2o/i2o_lan.c * drivers/i2o/i2o_lan.c
* *
* I2O LAN CLASS OSM May 26th 2000 * I2O LAN CLASS OSM May 26th 2000
* *
...@@ -1564,6 +1564,8 @@ EXPORT_NO_SYMBOLS; ...@@ -1564,6 +1564,8 @@ EXPORT_NO_SYMBOLS;
MODULE_AUTHOR("University of Helsinki, Department of Computer Science"); MODULE_AUTHOR("University of Helsinki, Department of Computer Science");
MODULE_DESCRIPTION("I2O Lan OSM"); MODULE_DESCRIPTION("I2O Lan OSM");
MODULE_LICENSE("GPL");
MODULE_PARM(max_buckets_out, "1-" __MODULE_STRING(I2O_LAN_MAX_BUCKETS_OUT) "i"); MODULE_PARM(max_buckets_out, "1-" __MODULE_STRING(I2O_LAN_MAX_BUCKETS_OUT) "i");
MODULE_PARM_DESC(max_buckets_out, "Total number of buckets to post (1-)"); MODULE_PARM_DESC(max_buckets_out, "Total number of buckets to post (1-)");
......
...@@ -219,7 +219,11 @@ int __init i2o_pci_install(struct pci_dev *dev) ...@@ -219,7 +219,11 @@ int __init i2o_pci_install(struct pci_dev *dev)
printk(KERN_INFO "I2O: MTRR workaround for Intel i960 processor\n"); printk(KERN_INFO "I2O: MTRR workaround for Intel i960 processor\n");
c->bus.pci.mtrr_reg1 = mtrr_add(c->mem_phys, 65536, MTRR_TYPE_UNCACHABLE, 1); c->bus.pci.mtrr_reg1 = mtrr_add(c->mem_phys, 65536, MTRR_TYPE_UNCACHABLE, 1);
if(c->bus.pci.mtrr_reg1< 0) if(c->bus.pci.mtrr_reg1< 0)
{
printk(KERN_INFO "i2o_pci: Error in setting MTRR_TYPE_UNCACHABLE\n"); printk(KERN_INFO "i2o_pci: Error in setting MTRR_TYPE_UNCACHABLE\n");
mtrr_del(c->bus.pci.mtrr_reg0, c->mem_phys, size);
c->bus.pci.mtrr_reg0 = -1;
}
} }
#endif #endif
...@@ -277,6 +281,14 @@ int __init i2o_pci_scan(void) ...@@ -277,6 +281,14 @@ int __init i2o_pci_scan(void)
{ {
if((dev->class>>8)!=PCI_CLASS_INTELLIGENT_I2O) if((dev->class>>8)!=PCI_CLASS_INTELLIGENT_I2O)
continue; continue;
if(dev->vendor == PCI_VENDOR_ID_DPT)
{
if(dev->device == 0xA501 || dev->device == 0xA511)
{
printk(KERN_INFO "i2o: Skipping Adaptec/DPT I2O raid with preferred native driver.\n");
continue;
}
}
if((dev->class&0xFF)>1) if((dev->class&0xFF)>1)
{ {
printk(KERN_INFO "i2o: I2O Controller found but does not support I2O 1.5 (skipping).\n"); printk(KERN_INFO "i2o: I2O Controller found but does not support I2O 1.5 (skipping).\n");
...@@ -367,6 +379,8 @@ EXPORT_SYMBOL(i2o_pci_core_detach); ...@@ -367,6 +379,8 @@ EXPORT_SYMBOL(i2o_pci_core_detach);
MODULE_AUTHOR("Red Hat Software"); MODULE_AUTHOR("Red Hat Software");
MODULE_DESCRIPTION("I2O PCI Interface"); MODULE_DESCRIPTION("I2O PCI Interface");
MODULE_LICENSE("GPL");
#else #else
void __init i2o_pci_init(void) void __init i2o_pci_init(void)
......
...@@ -299,14 +299,13 @@ static char* bus_strings[] = ...@@ -299,14 +299,13 @@ static char* bus_strings[] =
static spinlock_t i2o_proc_lock = SPIN_LOCK_UNLOCKED; static spinlock_t i2o_proc_lock = SPIN_LOCK_UNLOCKED;
int i2o_proc_read_hrt(char *buf, char **start, off_t offset, int len, int i2o_proc_read_hrt(char *buf, char **start, off_t offset, int count,
int *eof, void *data) int *eof, void *data)
{ {
struct i2o_controller *c = (struct i2o_controller *)data; struct i2o_controller *c = (struct i2o_controller *)data;
i2o_hrt *hrt = (i2o_hrt *)c->hrt; i2o_hrt *hrt = (i2o_hrt *)c->hrt;
u32 bus; u32 bus;
int count; int len, i;
int i;
spin_lock(&i2o_proc_lock); spin_lock(&i2o_proc_lock);
...@@ -320,9 +319,7 @@ int i2o_proc_read_hrt(char *buf, char **start, off_t offset, int len, ...@@ -320,9 +319,7 @@ int i2o_proc_read_hrt(char *buf, char **start, off_t offset, int len,
return len; return len;
} }
count = hrt->num_entries; if((hrt->num_entries * hrt->entry_len + 8) > 2048) {
if((count * hrt->entry_len + 8) > 2048) {
printk(KERN_WARNING "i2o_proc: HRT does not fit into buffer\n"); printk(KERN_WARNING "i2o_proc: HRT does not fit into buffer\n");
len += sprintf(buf+len, len += sprintf(buf+len,
"HRT table too big to fit in buffer.\n"); "HRT table too big to fit in buffer.\n");
...@@ -331,9 +328,9 @@ int i2o_proc_read_hrt(char *buf, char **start, off_t offset, int len, ...@@ -331,9 +328,9 @@ int i2o_proc_read_hrt(char *buf, char **start, off_t offset, int len,
} }
len += sprintf(buf+len, "HRT has %d entries of %d bytes each.\n", len += sprintf(buf+len, "HRT has %d entries of %d bytes each.\n",
count, hrt->entry_len << 2); hrt->num_entries, hrt->entry_len << 2);
for(i = 0; i < count; i++) for(i = 0; i < hrt->num_entries && len < count; i++)
{ {
len += sprintf(buf+len, "Entry %d:\n", i); len += sprintf(buf+len, "Entry %d:\n", i);
len += sprintf(buf+len, " Adapter ID: %0#10x\n", len += sprintf(buf+len, " Adapter ID: %0#10x\n",
...@@ -3297,7 +3294,7 @@ void i2o_proc_remove_device(struct i2o_device *dev) ...@@ -3297,7 +3294,7 @@ void i2o_proc_remove_device(struct i2o_device *dev)
void i2o_proc_dev_del(struct i2o_controller *c, struct i2o_device *d) void i2o_proc_dev_del(struct i2o_controller *c, struct i2o_device *d)
{ {
#ifdef DRIVERDEBUG #ifdef DRIVERDEBUG
printk(KERN_INFO, "Deleting device %d from iop%d\n", printk(KERN_INFO "Deleting device %d from iop%d\n",
d->lct_data.tid, c->unit); d->lct_data.tid, c->unit);
#endif #endif
...@@ -3365,6 +3362,7 @@ int __init i2o_proc_init(void) ...@@ -3365,6 +3362,7 @@ int __init i2o_proc_init(void)
MODULE_AUTHOR("Deepak Saxena"); MODULE_AUTHOR("Deepak Saxena");
MODULE_DESCRIPTION("I2O procfs Handler"); MODULE_DESCRIPTION("I2O procfs Handler");
MODULE_LICENSE("GPL");
static void __exit i2o_proc_exit(void) static void __exit i2o_proc_exit(void)
{ {
......
...@@ -41,6 +41,7 @@ ...@@ -41,6 +41,7 @@
#include <linux/timer.h> #include <linux/timer.h>
#include <linux/delay.h> #include <linux/delay.h>
#include <linux/proc_fs.h> #include <linux/proc_fs.h>
#include <linux/prefetch.h>
#include <asm/dma.h> #include <asm/dma.h>
#include <asm/system.h> #include <asm/system.h>
#include <asm/io.h> #include <asm/io.h>
...@@ -152,7 +153,9 @@ static void i2o_scsi_reply(struct i2o_handler *h, struct i2o_controller *c, stru ...@@ -152,7 +153,9 @@ static void i2o_scsi_reply(struct i2o_handler *h, struct i2o_controller *c, stru
Scsi_Cmnd *current_command; Scsi_Cmnd *current_command;
u32 *m = (u32 *)msg; u32 *m = (u32 *)msg;
u8 as,ds,st; u8 as,ds,st;
spin_lock_prefetch(&io_request_lock);
if(m[0] & (1<<13)) if(m[0] & (1<<13))
{ {
printk("IOP fail.\n"); printk("IOP fail.\n");
...@@ -201,7 +204,9 @@ static void i2o_scsi_reply(struct i2o_handler *h, struct i2o_controller *c, stru ...@@ -201,7 +204,9 @@ static void i2o_scsi_reply(struct i2o_handler *h, struct i2o_controller *c, stru
} }
return; return;
} }
prefetchw(&queue_depth);
/* /*
* Low byte is device status, next is adapter status, * Low byte is device status, next is adapter status,
...@@ -548,6 +553,11 @@ int i2o_scsi_queuecommand(Scsi_Cmnd * SCpnt, void (*done) (Scsi_Cmnd *)) ...@@ -548,6 +553,11 @@ int i2o_scsi_queuecommand(Scsi_Cmnd * SCpnt, void (*done) (Scsi_Cmnd *))
host = SCpnt->host; host = SCpnt->host;
hostdata = (struct i2o_scsi_host *)host->hostdata; hostdata = (struct i2o_scsi_host *)host->hostdata;
c = hostdata->controller;
prefetch(c);
prefetchw(&queue_depth);
SCpnt->scsi_done = done; SCpnt->scsi_done = done;
if(SCpnt->target > 15) if(SCpnt->target > 15)
...@@ -575,7 +585,6 @@ int i2o_scsi_queuecommand(Scsi_Cmnd * SCpnt, void (*done) (Scsi_Cmnd *)) ...@@ -575,7 +585,6 @@ int i2o_scsi_queuecommand(Scsi_Cmnd * SCpnt, void (*done) (Scsi_Cmnd *))
dprintk(("Real scsi messages.\n")); dprintk(("Real scsi messages.\n"));
c = hostdata->controller;
/* /*
* Obtain an I2O message. Right now we _have_ to obtain one * Obtain an I2O message. Right now we _have_ to obtain one
...@@ -906,6 +915,8 @@ int i2o_scsi_bios_param(Disk * disk, kdev_t dev, int *ip) ...@@ -906,6 +915,8 @@ int i2o_scsi_bios_param(Disk * disk, kdev_t dev, int *ip)
} }
MODULE_AUTHOR("Red Hat Software"); MODULE_AUTHOR("Red Hat Software");
MODULE_LICENSE("GPL");
static Scsi_Host_Template driver_template = I2OSCSI; static Scsi_Host_Template driver_template = I2OSCSI;
......
...@@ -78,9 +78,9 @@ static dpt_sig_S DPTI_sig = { ...@@ -78,9 +78,9 @@ static dpt_sig_S DPTI_sig = {
{'d', 'P', 't', 'S', 'i', 'G'}, SIG_VERSION, {'d', 'P', 't', 'S', 'i', 'G'}, SIG_VERSION,
#ifdef __i386__ #ifdef __i386__
PROC_INTEL, PROC_386 | PROC_486 | PROC_PENTIUM | PROC_SEXIUM, PROC_INTEL, PROC_386 | PROC_486 | PROC_PENTIUM | PROC_SEXIUM,
#elif defined __ia64__ #elif defined(__ia64__)
PROC_INTEL, PROC_IA64, PROC_INTEL, PROC_IA64,
#elif define __sparc__ #elif defined(__sparc__)
PROC_ULTRASPARC, PROC_ULTRASPARC,
#elif defined(__alpha__) #elif defined(__alpha__)
PROC_ALPHA , PROC_ALPHA ,
...@@ -1152,12 +1152,12 @@ static int adpt_i2o_post_wait(adpt_hba* pHba, u32* msg, int len, int timeout) ...@@ -1152,12 +1152,12 @@ static int adpt_i2o_post_wait(adpt_hba* pHba, u32* msg, int len, int timeout)
timeout *= HZ; timeout *= HZ;
if((status = adpt_i2o_post_this(pHba, msg, len)) == 0){ if((status = adpt_i2o_post_this(pHba, msg, len)) == 0){
if(!timeout){ if(!timeout){
current->state = TASK_INTERRUPTIBLE; set_current_state(TASK_INTERRUPTIBLE);
spin_unlock_irq(&io_request_lock); spin_unlock_irq(&io_request_lock);
schedule(); schedule();
spin_lock_irq(&io_request_lock); spin_lock_irq(&io_request_lock);
} else { } else {
current->state = TASK_INTERRUPTIBLE; set_current_state(TASK_INTERRUPTIBLE);
spin_unlock_irq(&io_request_lock); spin_unlock_irq(&io_request_lock);
schedule_timeout(timeout*HZ); schedule_timeout(timeout*HZ);
spin_lock_irq(&io_request_lock); spin_lock_irq(&io_request_lock);
...@@ -1799,11 +1799,11 @@ static int adpt_system_info(void *buffer) ...@@ -1799,11 +1799,11 @@ static int adpt_system_info(void *buffer)
#if defined __i386__ #if defined __i386__
adpt_i386_info(&si); adpt_i386_info(&si);
#elif defined __ia64__ #elif defined (__ia64__)
adpt_ia64_info(&si); adpt_ia64_info(&si);
#elif define __sparc__ #elif defined(__sparc__)
adpt_sparc_info(&si); adpt_sparc_info(&si);
#elif defined __alpha__ #elif defined (__alpha__)
adpt_alpha_info(&si); adpt_alpha_info(&si);
#else #else
si.processorType = 0xff ; si.processorType = 0xff ;
......
...@@ -66,10 +66,13 @@ ...@@ -66,10 +66,13 @@
* I do not believe in debug levels as I never can guess what * I do not believe in debug levels as I never can guess what
* part of the code is going to be problematic in the future. * part of the code is going to be problematic in the future.
* Don't forget to run your klogd with -c 8. * Don't forget to run your klogd with -c 8.
*
* Example (do not remove):
* #define YMFDBG(fmt, arg...) do{ printk(KERN_DEBUG fmt, ##arg); }while(0)
*/ */
/* #define YMFDBG(fmt, arg...) do{ printk(KERN_DEBUG fmt, ##arg); }while(0) */ #define YMFDBGW(fmt, arg...) /* */ /* write counts */
#define YMFDBGW(fmt, arg...) /* */ #define YMFDBGI(fmt, arg...) /* */ /* interrupts */
#define YMFDBGI(fmt, arg...) /* */ #define YMFDBGX(fmt, arg...) /* */ /* ioctl */
static int ymf_playback_trigger(ymfpci_t *unit, struct ymf_pcm *ypcm, int cmd); static int ymf_playback_trigger(ymfpci_t *unit, struct ymf_pcm *ypcm, int cmd);
static void ymf_capture_trigger(ymfpci_t *unit, struct ymf_pcm *ypcm, int cmd); static void ymf_capture_trigger(ymfpci_t *unit, struct ymf_pcm *ypcm, int cmd);
...@@ -330,7 +333,7 @@ static int prog_dmabuf(struct ymf_state *state, int rec) ...@@ -330,7 +333,7 @@ static int prog_dmabuf(struct ymf_state *state, int rec)
int w_16; int w_16;
unsigned bufsize; unsigned bufsize;
unsigned long flags; unsigned long flags;
int redzone; int redzone, redfrags;
int ret; int ret;
w_16 = ymf_pcm_format_width(state->format.format) == 16; w_16 = ymf_pcm_format_width(state->format.format) == 16;
...@@ -352,36 +355,27 @@ static int prog_dmabuf(struct ymf_state *state, int rec) ...@@ -352,36 +355,27 @@ static int prog_dmabuf(struct ymf_state *state, int rec)
* Import what Doom might have set with SNDCTL_DSP_SETFRAGMENT. * Import what Doom might have set with SNDCTL_DSP_SETFRAGMENT.
*/ */
bufsize = PAGE_SIZE << dmabuf->buforder; bufsize = PAGE_SIZE << dmabuf->buforder;
/* lets hand out reasonable big ass buffers by default */ /* By default we give 4 big buffers. */
dmabuf->fragshift = (dmabuf->buforder + PAGE_SHIFT -2); dmabuf->fragshift = (dmabuf->buforder + PAGE_SHIFT - 2);
if (dmabuf->ossfragshift > 3 && if (dmabuf->ossfragshift > 3 &&
dmabuf->ossfragshift < dmabuf->fragshift) { dmabuf->ossfragshift < dmabuf->fragshift) {
/* If OSS set smaller fragments, give more smaller buffers. */
dmabuf->fragshift = dmabuf->ossfragshift; dmabuf->fragshift = dmabuf->ossfragshift;
} }
dmabuf->numfrag = bufsize >> dmabuf->fragshift;
while (dmabuf->numfrag < 4 && dmabuf->fragshift > 3) {
dmabuf->fragshift--;
dmabuf->numfrag = bufsize >> dmabuf->fragshift;
}
dmabuf->fragsize = 1 << dmabuf->fragshift; dmabuf->fragsize = 1 << dmabuf->fragshift;
dmabuf->dmasize = dmabuf->numfrag << dmabuf->fragshift;
if (dmabuf->ossmaxfrags >= 2 && dmabuf->ossmaxfrags < dmabuf->numfrag) { dmabuf->numfrag = bufsize >> dmabuf->fragshift;
dmabuf->numfrag = dmabuf->ossmaxfrags; dmabuf->dmasize = dmabuf->numfrag << dmabuf->fragshift;
dmabuf->dmasize = dmabuf->numfrag << dmabuf->fragshift;
if (dmabuf->ossmaxfrags >= 2) {
redzone = ymf_calc_lend(state->format.rate); redzone = ymf_calc_lend(state->format.rate);
redzone <<= (state->format.shift + 1); redzone <<= state->format.shift;
if (dmabuf->dmasize < redzone*3) { redzone *= 3;
/* redfrags = (redzone + dmabuf->fragsize-1) >> dmabuf->fragshift;
* The driver works correctly with minimum dmasize
* of redzone*2, but it produces stoppage and clicks. if (dmabuf->ossmaxfrags + redfrags < dmabuf->numfrag) {
* So, make it little larger for smoother sound. dmabuf->numfrag = dmabuf->ossmaxfrags + redfrags;
* XXX Make dmasize a wholy divisible by fragsize. dmabuf->dmasize = dmabuf->numfrag << dmabuf->fragshift;
*/
// printk(KERN_ERR "ymfpci: dmasize=%d < redzone=%d * 3\n",
// dmabuf->dmasize, redzone);
dmabuf->dmasize = redzone*3;
} }
} }
...@@ -1440,7 +1434,7 @@ ymf_write(struct file *file, const char *buffer, size_t count, loff_t *ppos) ...@@ -1440,7 +1434,7 @@ ymf_write(struct file *file, const char *buffer, size_t count, loff_t *ppos)
set_current_state(TASK_RUNNING); set_current_state(TASK_RUNNING);
remove_wait_queue(&dmabuf->wait, &waita); remove_wait_queue(&dmabuf->wait, &waita);
YMFDBGW("ymf_write: dmabuf.count %d\n", dmabuf->count); YMFDBGW("ymf_write: ret %d dmabuf.count %d\n", ret, dmabuf->count);
return ret; return ret;
} }
...@@ -1794,6 +1788,7 @@ static int ymf_ioctl(struct inode *inode, struct file *file, ...@@ -1794,6 +1788,7 @@ static int ymf_ioctl(struct inode *inode, struct file *file,
case SNDCTL_DSP_SETSYNCRO: case SNDCTL_DSP_SETSYNCRO:
case SOUND_PCM_WRITE_FILTER: case SOUND_PCM_WRITE_FILTER:
case SOUND_PCM_READ_FILTER: case SOUND_PCM_READ_FILTER:
YMFDBGX("ymf_ioctl: cmd 0x%x unsupported\n", cmd);
return -ENOTTY; return -ENOTTY;
default: default:
...@@ -1802,7 +1797,8 @@ static int ymf_ioctl(struct inode *inode, struct file *file, ...@@ -1802,7 +1797,8 @@ static int ymf_ioctl(struct inode *inode, struct file *file,
* or perhaps they expect "universal" ioctls, * or perhaps they expect "universal" ioctls,
* for instance we get SNDCTL_TMR_CONTINUE here. * for instance we get SNDCTL_TMR_CONTINUE here.
*/ */
break; YMFDBGX("ymf_ioctl: cmd 0x%x unknown\n", cmd);
break;
} }
return -ENOTTY; return -ENOTTY;
} }
......
...@@ -112,15 +112,16 @@ union bdflush_param { ...@@ -112,15 +112,16 @@ union bdflush_param {
int dummy5; /* unused */ int dummy5; /* unused */
} b_un; } b_un;
unsigned int data[N_PARAM]; unsigned int data[N_PARAM];
} bdf_prm = {{30, 64, 64, 256, 5*HZ, 30*HZ, 60, 0, 0}}; } bdf_prm = {{40, 0, 0, 0, 5*HZ, 30*HZ, 60, 0, 0}};
/* These are the min and max parameter values that we will allow to be assigned */ /* These are the min and max parameter values that we will allow to be assigned */
int bdflush_min[N_PARAM] = { 0, 10, 5, 25, 0, 1*HZ, 0, 0, 0}; int bdflush_min[N_PARAM] = { 0, 10, 5, 25, 0, 1*HZ, 0, 0, 0};
int bdflush_max[N_PARAM] = {100,50000, 20000, 20000,10000*HZ, 6000*HZ, 100, 0, 0}; int bdflush_max[N_PARAM] = {100,50000, 20000, 20000,10000*HZ, 6000*HZ, 100, 0, 0};
inline void unlock_buffer(struct buffer_head *bh) void unlock_buffer(struct buffer_head *bh)
{ {
clear_bit(BH_Wait_IO, &bh->b_state); clear_bit(BH_Wait_IO, &bh->b_state);
clear_bit(BH_launder, &bh->b_state);
clear_bit(BH_Lock, &bh->b_state); clear_bit(BH_Lock, &bh->b_state);
smp_mb__after_clear_bit(); smp_mb__after_clear_bit();
if (waitqueue_active(&bh->b_wait)) if (waitqueue_active(&bh->b_wait))
...@@ -2331,29 +2332,40 @@ static int grow_buffers(kdev_t dev, unsigned long block, int size) ...@@ -2331,29 +2332,40 @@ static int grow_buffers(kdev_t dev, unsigned long block, int size)
return 1; return 1;
} }
static int sync_page_buffers(struct buffer_head *bh, unsigned int gfp_mask) static int sync_page_buffers(struct buffer_head *head, unsigned int gfp_mask)
{ {
struct buffer_head * p = bh; struct buffer_head * bh = head;
int tryagain = 1; int tryagain = 0;
do { do {
if (buffer_dirty(p) || buffer_locked(p)) { if (!buffer_dirty(bh) && !buffer_locked(bh))
if (test_and_set_bit(BH_Wait_IO, &p->b_state)) { continue;
if (buffer_dirty(p)) {
ll_rw_block(WRITE, 1, &p); /* Don't start IO first time around.. */
tryagain = 0; if (!test_and_set_bit(BH_Wait_IO, &bh->b_state))
} else if (buffer_locked(p)) { continue;
if (gfp_mask & __GFP_WAITBUF) {
wait_on_buffer(p); /* Second time through we start actively writing out.. */
tryagain = 1; if (test_and_set_bit(BH_Lock, &bh->b_state)) {
} else if (!test_bit(BH_launder, &bh->b_state))
tryagain = 0; continue;
} wait_on_buffer(bh);
} else tryagain = 1;
tryagain = 0; continue;
}
if (!atomic_set_buffer_clean(bh)) {
unlock_buffer(bh);
continue;
} }
p = p->b_this_page;
} while (p != bh); __mark_buffer_clean(bh);
get_bh(bh);
set_bit(BH_launder, &bh->b_state);
bh->b_end_io = end_buffer_io_sync;
submit_bh(WRITE, bh);
tryagain = 0;
} while ((bh = bh->b_this_page) != head);
return tryagain; return tryagain;
} }
......
...@@ -1159,6 +1159,7 @@ EXPORT_NO_SYMBOLS; ...@@ -1159,6 +1159,7 @@ EXPORT_NO_SYMBOLS;
*/ */
MODULE_AUTHOR("Anton Altaparmakov <aia21@cus.cam.ac.uk>"); MODULE_AUTHOR("Anton Altaparmakov <aia21@cus.cam.ac.uk>");
MODULE_DESCRIPTION("Linux NTFS driver"); MODULE_DESCRIPTION("Linux NTFS driver");
MODULE_LICENSE("GPL");
#ifdef DEBUG #ifdef DEBUG
MODULE_PARM(ntdebug, "i"); MODULE_PARM(ntdebug, "i");
MODULE_PARM_DESC(ntdebug, "Debug level"); MODULE_PARM_DESC(ntdebug, "Debug level");
......
...@@ -214,7 +214,8 @@ enum bh_state_bits { ...@@ -214,7 +214,8 @@ enum bh_state_bits {
BH_Mapped, /* 1 if the buffer has a disk mapping */ BH_Mapped, /* 1 if the buffer has a disk mapping */
BH_New, /* 1 if the buffer is new and not yet written out */ BH_New, /* 1 if the buffer is new and not yet written out */
BH_Async, /* 1 if the buffer is under end_buffer_io_async I/O */ BH_Async, /* 1 if the buffer is under end_buffer_io_async I/O */
BH_Wait_IO, /* 1 if we should throttle on this buffer */ BH_Wait_IO, /* 1 if we should write out this buffer */
BH_launder, /* 1 if we should throttle on this buffer */
BH_PrivateStart,/* not a state bit, but the first bit available BH_PrivateStart,/* not a state bit, but the first bit available
* for private allocation by other entities * for private allocation by other entities
......
...@@ -26,7 +26,7 @@ static inline void lock_buffer(struct buffer_head * bh) ...@@ -26,7 +26,7 @@ static inline void lock_buffer(struct buffer_head * bh)
__wait_on_buffer(bh); __wait_on_buffer(bh);
} }
extern void unlock_buffer(struct buffer_head *bh); extern void FASTCALL(unlock_buffer(struct buffer_head *bh));
/* /*
* super-block locking. Again, interrupts may only unlock * super-block locking. Again, interrupts may only unlock
......
...@@ -279,6 +279,7 @@ typedef struct page { ...@@ -279,6 +279,7 @@ typedef struct page {
#define PG_checked 12 /* kill me in 2.5.<early>. */ #define PG_checked 12 /* kill me in 2.5.<early>. */
#define PG_arch_1 13 #define PG_arch_1 13
#define PG_reserved 14 #define PG_reserved 14
#define PG_launder 15 /* written out by VM pressure.. */
/* Make it prettier to test the above... */ /* Make it prettier to test the above... */
#define Page_Uptodate(page) test_bit(PG_uptodate, &(page)->flags) #define Page_Uptodate(page) test_bit(PG_uptodate, &(page)->flags)
...@@ -292,6 +293,8 @@ typedef struct page { ...@@ -292,6 +293,8 @@ typedef struct page {
#define TryLockPage(page) test_and_set_bit(PG_locked, &(page)->flags) #define TryLockPage(page) test_and_set_bit(PG_locked, &(page)->flags)
#define PageChecked(page) test_bit(PG_checked, &(page)->flags) #define PageChecked(page) test_bit(PG_checked, &(page)->flags)
#define SetPageChecked(page) set_bit(PG_checked, &(page)->flags) #define SetPageChecked(page) set_bit(PG_checked, &(page)->flags)
#define PageLaunder(page) test_bit(PG_launder, &(page)->flags)
#define SetPageLaunder(page) set_bit(PG_launder, &(page)->flags)
extern void __set_page_dirty(struct page *); extern void __set_page_dirty(struct page *);
...@@ -308,6 +311,7 @@ static inline void set_page_dirty(struct page * page) ...@@ -308,6 +311,7 @@ static inline void set_page_dirty(struct page * page)
* parallel wait_on_page). * parallel wait_on_page).
*/ */
#define UnlockPage(page) do { \ #define UnlockPage(page) do { \
clear_bit(PG_launder, &(page)->flags); \
smp_mb__before_clear_bit(); \ smp_mb__before_clear_bit(); \
if (!test_and_clear_bit(PG_locked, &(page)->flags)) BUG(); \ if (!test_and_clear_bit(PG_locked, &(page)->flags)) BUG(); \
smp_mb__after_clear_bit(); \ smp_mb__after_clear_bit(); \
...@@ -550,17 +554,16 @@ extern struct page *filemap_nopage(struct vm_area_struct *, unsigned long, int); ...@@ -550,17 +554,16 @@ extern struct page *filemap_nopage(struct vm_area_struct *, unsigned long, int);
#define __GFP_IO 0x40 /* Can start low memory physical IO? */ #define __GFP_IO 0x40 /* Can start low memory physical IO? */
#define __GFP_HIGHIO 0x80 /* Can start high mem physical IO? */ #define __GFP_HIGHIO 0x80 /* Can start high mem physical IO? */
#define __GFP_FS 0x100 /* Can call down to low-level FS? */ #define __GFP_FS 0x100 /* Can call down to low-level FS? */
#define __GFP_WAITBUF 0x200 /* Can we wait for buffers to complete? */
#define GFP_NOHIGHIO (__GFP_HIGH | __GFP_WAIT | __GFP_IO) #define GFP_NOHIGHIO (__GFP_HIGH | __GFP_WAIT | __GFP_IO)
#define GFP_NOIO (__GFP_HIGH | __GFP_WAIT) #define GFP_NOIO (__GFP_HIGH | __GFP_WAIT)
#define GFP_NOFS (__GFP_HIGH | __GFP_WAIT | __GFP_IO | __GFP_HIGHIO | __GFP_WAITBUF) #define GFP_NOFS (__GFP_HIGH | __GFP_WAIT | __GFP_IO | __GFP_HIGHIO)
#define GFP_ATOMIC (__GFP_HIGH) #define GFP_ATOMIC (__GFP_HIGH)
#define GFP_USER ( __GFP_WAIT | __GFP_IO | __GFP_HIGHIO | __GFP_WAITBUF | __GFP_FS) #define GFP_USER ( __GFP_WAIT | __GFP_IO | __GFP_HIGHIO | __GFP_FS)
#define GFP_HIGHUSER ( __GFP_WAIT | __GFP_IO | __GFP_HIGHIO | __GFP_WAITBUF | __GFP_FS | __GFP_HIGHMEM) #define GFP_HIGHUSER ( __GFP_WAIT | __GFP_IO | __GFP_HIGHIO | __GFP_FS | __GFP_HIGHMEM)
#define GFP_KERNEL (__GFP_HIGH | __GFP_WAIT | __GFP_IO | __GFP_HIGHIO | __GFP_WAITBUF | __GFP_FS) #define GFP_KERNEL (__GFP_HIGH | __GFP_WAIT | __GFP_IO | __GFP_HIGHIO | __GFP_FS)
#define GFP_NFS (__GFP_HIGH | __GFP_WAIT | __GFP_IO | __GFP_HIGHIO | __GFP_WAITBUF | __GFP_FS) #define GFP_NFS (__GFP_HIGH | __GFP_WAIT | __GFP_IO | __GFP_HIGHIO | __GFP_FS)
#define GFP_KSWAPD ( __GFP_WAIT | __GFP_IO | __GFP_HIGHIO | __GFP_WAITBUF | __GFP_FS) #define GFP_KSWAPD ( __GFP_WAIT | __GFP_IO | __GFP_HIGHIO | __GFP_FS)
/* Flag - indicates that the buffer will be suitable for DMA. Ignored on some /* Flag - indicates that the buffer will be suitable for DMA. Ignored on some
platforms, used as appropriate on others */ platforms, used as appropriate on others */
......
...@@ -24,7 +24,7 @@ typedef struct kmem_cache_s kmem_cache_t; ...@@ -24,7 +24,7 @@ typedef struct kmem_cache_s kmem_cache_t;
#define SLAB_NFS GFP_NFS #define SLAB_NFS GFP_NFS
#define SLAB_DMA GFP_DMA #define SLAB_DMA GFP_DMA
#define SLAB_LEVEL_MASK (__GFP_WAIT|__GFP_HIGH|__GFP_IO|__GFP_HIGHIO|__GFP_WAITBUF|__GFP_FS) #define SLAB_LEVEL_MASK (__GFP_WAIT|__GFP_HIGH|__GFP_IO|__GFP_HIGHIO|__GFP_FS)
#define SLAB_NO_GROW 0x00001000UL /* don't grow a cache */ #define SLAB_NO_GROW 0x00001000UL /* don't grow a cache */
/* flags to pass to kmem_cache_create(). /* flags to pass to kmem_cache_create().
......
...@@ -102,15 +102,13 @@ extern void FASTCALL(__lru_cache_del(struct page *)); ...@@ -102,15 +102,13 @@ extern void FASTCALL(__lru_cache_del(struct page *));
extern void FASTCALL(lru_cache_del(struct page *)); extern void FASTCALL(lru_cache_del(struct page *));
extern void FASTCALL(deactivate_page(struct page *)); extern void FASTCALL(deactivate_page(struct page *));
extern void FASTCALL(deactivate_page_nolock(struct page *));
extern void FASTCALL(activate_page(struct page *)); extern void FASTCALL(activate_page(struct page *));
extern void FASTCALL(activate_page_nolock(struct page *));
extern void swap_setup(void); extern void swap_setup(void);
/* linux/mm/vmscan.c */ /* linux/mm/vmscan.c */
extern wait_queue_head_t kswapd_wait; extern wait_queue_head_t kswapd_wait;
extern int FASTCALL(try_to_free_pages(unsigned int, unsigned int)); extern int FASTCALL(try_to_free_pages(zone_t *, unsigned int, unsigned int));
/* linux/mm/page_io.c */ /* linux/mm/page_io.c */
extern void rw_swap_page(int, struct page *); extern void rw_swap_page(int, struct page *);
......
...@@ -149,28 +149,21 @@ static inline int has_stopped_jobs(int pgrp) ...@@ -149,28 +149,21 @@ static inline int has_stopped_jobs(int pgrp)
} }
/* /*
* When we die, we re-parent all our children. * When we die, we re-parent all our children to
* Try to give them to another thread in our process
* group, and if no such member exists, give it to
* the global child reaper process (ie "init") * the global child reaper process (ie "init")
*/ */
static inline void forget_original_parent(struct task_struct * father) static inline void forget_original_parent(struct task_struct * father)
{ {
struct task_struct * p, *reaper; struct task_struct * p;
read_lock(&tasklist_lock); read_lock(&tasklist_lock);
/* Next in our thread group */
reaper = next_thread(father);
if (reaper == father)
reaper = child_reaper;
for_each_task(p) { for_each_task(p) {
if (p->p_opptr == father) { if (p->p_opptr == father) {
/* We dont want people slaying init */ /* We dont want people slaying init */
p->exit_signal = SIGCHLD; p->exit_signal = SIGCHLD;
p->self_exec_id++; p->self_exec_id++;
p->p_opptr = reaper; p->p_opptr = child_reaper;
if (p->pdeath_signal) send_sig(p->pdeath_signal, p, 0); if (p->pdeath_signal) send_sig(p->pdeath_signal, p, 0);
} }
} }
......
...@@ -327,7 +327,6 @@ struct page *alloc_bounce_page (void) ...@@ -327,7 +327,6 @@ struct page *alloc_bounce_page (void)
struct list_head *tmp; struct list_head *tmp;
struct page *page; struct page *page;
repeat_alloc:
page = alloc_page(GFP_NOHIGHIO); page = alloc_page(GFP_NOHIGHIO);
if (page) if (page)
return page; return page;
...@@ -337,6 +336,7 @@ struct page *alloc_bounce_page (void) ...@@ -337,6 +336,7 @@ struct page *alloc_bounce_page (void)
*/ */
wakeup_bdflush(); wakeup_bdflush();
repeat_alloc:
/* /*
* Try to allocate from the emergency pool. * Try to allocate from the emergency pool.
*/ */
...@@ -365,7 +365,6 @@ struct buffer_head *alloc_bounce_bh (void) ...@@ -365,7 +365,6 @@ struct buffer_head *alloc_bounce_bh (void)
struct list_head *tmp; struct list_head *tmp;
struct buffer_head *bh; struct buffer_head *bh;
repeat_alloc:
bh = kmem_cache_alloc(bh_cachep, SLAB_NOHIGHIO); bh = kmem_cache_alloc(bh_cachep, SLAB_NOHIGHIO);
if (bh) if (bh)
return bh; return bh;
...@@ -375,6 +374,7 @@ struct buffer_head *alloc_bounce_bh (void) ...@@ -375,6 +374,7 @@ struct buffer_head *alloc_bounce_bh (void)
*/ */
wakeup_bdflush(); wakeup_bdflush();
repeat_alloc:
/* /*
* Try to allocate from the emergency pool. * Try to allocate from the emergency pool.
*/ */
......
...@@ -242,7 +242,7 @@ static struct page * balance_classzone(zone_t * classzone, unsigned int gfp_mask ...@@ -242,7 +242,7 @@ static struct page * balance_classzone(zone_t * classzone, unsigned int gfp_mask
current->allocation_order = order; current->allocation_order = order;
current->flags |= PF_MEMALLOC | PF_FREE_PAGES; current->flags |= PF_MEMALLOC | PF_FREE_PAGES;
__freed = try_to_free_pages(gfp_mask, order); __freed = try_to_free_pages(classzone, gfp_mask, order);
current->flags &= ~(PF_MEMALLOC | PF_FREE_PAGES); current->flags &= ~(PF_MEMALLOC | PF_FREE_PAGES);
...@@ -467,20 +467,23 @@ unsigned int nr_free_buffer_pages (void) ...@@ -467,20 +467,23 @@ unsigned int nr_free_buffer_pages (void)
{ {
pg_data_t *pgdat = pgdat_list; pg_data_t *pgdat = pgdat_list;
unsigned int sum = 0; unsigned int sum = 0;
zonelist_t *zonelist;
zone_t **zonep, *zone;
do { do {
zonelist = pgdat->node_zonelists + (GFP_USER & GFP_ZONEMASK); zonelist_t *zonelist = pgdat->node_zonelists + (GFP_USER & GFP_ZONEMASK);
zonep = zonelist->zones; zone_t **zonep = zonelist->zones;
zone_t *zone;
for (zone = *zonep++; zone; zone = *zonep++) for (zone = *zonep++; zone; zone = *zonep++) {
sum += zone->free_pages; unsigned long size = zone->size;
unsigned long high = zone->pages_high;
if (size > high)
sum += size - high;
}
pgdat = pgdat->node_next; pgdat = pgdat->node_next;
} while (pgdat); } while (pgdat);
return sum + nr_active_pages + nr_inactive_pages; return sum;
} }
#if CONFIG_HIGHMEM #if CONFIG_HIGHMEM
...@@ -497,6 +500,8 @@ unsigned int nr_free_highpages (void) ...@@ -497,6 +500,8 @@ unsigned int nr_free_highpages (void)
} }
#endif #endif
#define K(x) ((x) << (PAGE_SHIFT-10))
/* /*
* Show free area list (used inside shift_scroll-lock stuff) * Show free area list (used inside shift_scroll-lock stuff)
* We also calculate the percentage fragmentation. We do this by counting the * We also calculate the percentage fragmentation. We do this by counting the
...@@ -519,21 +524,17 @@ void show_free_areas_core(pg_data_t *pgdat) ...@@ -519,21 +524,17 @@ void show_free_areas_core(pg_data_t *pgdat)
printk("Zone:%s freepages:%6lukB min:%6luKB low:%6lukB " printk("Zone:%s freepages:%6lukB min:%6luKB low:%6lukB "
"high:%6lukB\n", "high:%6lukB\n",
zone->name, zone->name,
(zone->free_pages) K(zone->free_pages),
<< ((PAGE_SHIFT-10)), K(zone->pages_min),
zone->pages_min K(zone->pages_low),
<< ((PAGE_SHIFT-10)), K(zone->pages_high));
zone->pages_low
<< ((PAGE_SHIFT-10)),
zone->pages_high
<< ((PAGE_SHIFT-10)));
tmpdat = tmpdat->node_next; tmpdat = tmpdat->node_next;
} }
printk("Free pages: %6dkB (%6dkB HighMem)\n", printk("Free pages: %6dkB (%6dkB HighMem)\n",
nr_free_pages() << (PAGE_SHIFT-10), K(nr_free_pages()),
nr_free_highpages() << (PAGE_SHIFT-10)); K(nr_free_highpages()));
printk("( Active: %d, inactive: %d, free: %d )\n", printk("( Active: %d, inactive: %d, free: %d )\n",
nr_active_pages, nr_active_pages,
...@@ -564,7 +565,7 @@ void show_free_areas_core(pg_data_t *pgdat) ...@@ -564,7 +565,7 @@ void show_free_areas_core(pg_data_t *pgdat)
} }
spin_unlock_irqrestore(&zone->lock, flags); spin_unlock_irqrestore(&zone->lock, flags);
} }
printk("= %lukB)\n", total * (PAGE_SIZE>>10)); printk("= %lukB)\n", K(total));
} }
#ifdef SWAP_CACHE_INFO #ifdef SWAP_CACHE_INFO
......
...@@ -48,7 +48,7 @@ pager_daemon_t pager_daemon = { ...@@ -48,7 +48,7 @@ pager_daemon_t pager_daemon = {
* called on a page which is not on any of the lists, the * called on a page which is not on any of the lists, the
* page is left alone. * page is left alone.
*/ */
void deactivate_page_nolock(struct page * page) static inline void deactivate_page_nolock(struct page * page)
{ {
if (PageActive(page)) { if (PageActive(page)) {
del_page_from_active_list(page); del_page_from_active_list(page);
...@@ -66,7 +66,7 @@ void deactivate_page(struct page * page) ...@@ -66,7 +66,7 @@ void deactivate_page(struct page * page)
/* /*
* Move an inactive page to the active list. * Move an inactive page to the active list.
*/ */
void activate_page_nolock(struct page * page) static inline void activate_page_nolock(struct page * page)
{ {
if (PageInactive(page)) { if (PageInactive(page)) {
del_page_from_inactive_list(page); del_page_from_inactive_list(page);
......
...@@ -33,8 +33,6 @@ ...@@ -33,8 +33,6 @@
*/ */
#define DEF_PRIORITY (6) #define DEF_PRIORITY (6)
#define page_zone_plenty(page) ((page)->zone->free_pages > (page)->zone->pages_high)
/* /*
* The swap-out function returns 1 if it successfully * The swap-out function returns 1 if it successfully
* scanned all the pages it was asked to (`count'). * scanned all the pages it was asked to (`count').
...@@ -45,7 +43,7 @@ ...@@ -45,7 +43,7 @@
*/ */
/* mm->page_table_lock is held. mmap_sem is not held */ /* mm->page_table_lock is held. mmap_sem is not held */
static inline int try_to_swap_out(struct mm_struct * mm, struct vm_area_struct* vma, unsigned long address, pte_t * page_table, struct page *page) static inline int try_to_swap_out(struct mm_struct * mm, struct vm_area_struct* vma, unsigned long address, pte_t * page_table, struct page *page, zone_t * classzone)
{ {
pte_t pte; pte_t pte;
swp_entry_t entry; swp_entry_t entry;
...@@ -53,11 +51,16 @@ static inline int try_to_swap_out(struct mm_struct * mm, struct vm_area_struct* ...@@ -53,11 +51,16 @@ static inline int try_to_swap_out(struct mm_struct * mm, struct vm_area_struct*
/* Don't look at this pte if it's been accessed recently. */ /* Don't look at this pte if it's been accessed recently. */
if (ptep_test_and_clear_young(page_table)) { if (ptep_test_and_clear_young(page_table)) {
flush_tlb_page(vma, address); flush_tlb_page(vma, address);
mark_page_accessed(page);
return 0; return 0;
} }
/* Don't bother replenishing zones that have tons of memory */ /* Don't bother unmapping pages that are active */
if (page_zone_plenty(page)) if (PageActive(page))
return 0;
/* Don't bother replenishing zones not under pressure.. */
if (!memclass(page->zone, classzone))
return 0; return 0;
if (TryLockPage(page)) if (TryLockPage(page))
...@@ -146,7 +149,7 @@ static inline int try_to_swap_out(struct mm_struct * mm, struct vm_area_struct* ...@@ -146,7 +149,7 @@ static inline int try_to_swap_out(struct mm_struct * mm, struct vm_area_struct*
} }
/* mm->page_table_lock is held. mmap_sem is not held */ /* mm->page_table_lock is held. mmap_sem is not held */
static inline int swap_out_pmd(struct mm_struct * mm, struct vm_area_struct * vma, pmd_t *dir, unsigned long address, unsigned long end, int count) static inline int swap_out_pmd(struct mm_struct * mm, struct vm_area_struct * vma, pmd_t *dir, unsigned long address, unsigned long end, int count, zone_t * classzone)
{ {
pte_t * pte; pte_t * pte;
unsigned long pmd_end; unsigned long pmd_end;
...@@ -170,7 +173,7 @@ static inline int swap_out_pmd(struct mm_struct * mm, struct vm_area_struct * vm ...@@ -170,7 +173,7 @@ static inline int swap_out_pmd(struct mm_struct * mm, struct vm_area_struct * vm
struct page *page = pte_page(*pte); struct page *page = pte_page(*pte);
if (VALID_PAGE(page) && !PageReserved(page)) { if (VALID_PAGE(page) && !PageReserved(page)) {
count -= try_to_swap_out(mm, vma, address, pte, page); count -= try_to_swap_out(mm, vma, address, pte, page, classzone);
if (!count) { if (!count) {
address += PAGE_SIZE; address += PAGE_SIZE;
break; break;
...@@ -185,7 +188,7 @@ static inline int swap_out_pmd(struct mm_struct * mm, struct vm_area_struct * vm ...@@ -185,7 +188,7 @@ static inline int swap_out_pmd(struct mm_struct * mm, struct vm_area_struct * vm
} }
/* mm->page_table_lock is held. mmap_sem is not held */ /* mm->page_table_lock is held. mmap_sem is not held */
static inline int swap_out_pgd(struct mm_struct * mm, struct vm_area_struct * vma, pgd_t *dir, unsigned long address, unsigned long end, int count) static inline int swap_out_pgd(struct mm_struct * mm, struct vm_area_struct * vma, pgd_t *dir, unsigned long address, unsigned long end, int count, zone_t * classzone)
{ {
pmd_t * pmd; pmd_t * pmd;
unsigned long pgd_end; unsigned long pgd_end;
...@@ -205,7 +208,7 @@ static inline int swap_out_pgd(struct mm_struct * mm, struct vm_area_struct * vm ...@@ -205,7 +208,7 @@ static inline int swap_out_pgd(struct mm_struct * mm, struct vm_area_struct * vm
end = pgd_end; end = pgd_end;
do { do {
count = swap_out_pmd(mm, vma, pmd, address, end, count); count = swap_out_pmd(mm, vma, pmd, address, end, count, classzone);
if (!count) if (!count)
break; break;
address = (address + PMD_SIZE) & PMD_MASK; address = (address + PMD_SIZE) & PMD_MASK;
...@@ -215,7 +218,7 @@ static inline int swap_out_pgd(struct mm_struct * mm, struct vm_area_struct * vm ...@@ -215,7 +218,7 @@ static inline int swap_out_pgd(struct mm_struct * mm, struct vm_area_struct * vm
} }
/* mm->page_table_lock is held. mmap_sem is not held */ /* mm->page_table_lock is held. mmap_sem is not held */
static inline int swap_out_vma(struct mm_struct * mm, struct vm_area_struct * vma, unsigned long address, int count) static inline int swap_out_vma(struct mm_struct * mm, struct vm_area_struct * vma, unsigned long address, int count, zone_t * classzone)
{ {
pgd_t *pgdir; pgd_t *pgdir;
unsigned long end; unsigned long end;
...@@ -230,7 +233,7 @@ static inline int swap_out_vma(struct mm_struct * mm, struct vm_area_struct * vm ...@@ -230,7 +233,7 @@ static inline int swap_out_vma(struct mm_struct * mm, struct vm_area_struct * vm
if (address >= end) if (address >= end)
BUG(); BUG();
do { do {
count = swap_out_pgd(mm, vma, pgdir, address, end, count); count = swap_out_pgd(mm, vma, pgdir, address, end, count, classzone);
if (!count) if (!count)
break; break;
address = (address + PGDIR_SIZE) & PGDIR_MASK; address = (address + PGDIR_SIZE) & PGDIR_MASK;
...@@ -245,7 +248,7 @@ struct mm_struct *swap_mm = &init_mm; ...@@ -245,7 +248,7 @@ struct mm_struct *swap_mm = &init_mm;
/* /*
* Returns remaining count of pages to be swapped out by followup call. * Returns remaining count of pages to be swapped out by followup call.
*/ */
static inline int swap_out_mm(struct mm_struct * mm, int count, int * mmcounter) static inline int swap_out_mm(struct mm_struct * mm, int count, int * mmcounter, zone_t * classzone)
{ {
unsigned long address; unsigned long address;
struct vm_area_struct* vma; struct vm_area_struct* vma;
...@@ -267,7 +270,7 @@ static inline int swap_out_mm(struct mm_struct * mm, int count, int * mmcounter) ...@@ -267,7 +270,7 @@ static inline int swap_out_mm(struct mm_struct * mm, int count, int * mmcounter)
address = vma->vm_start; address = vma->vm_start;
for (;;) { for (;;) {
count = swap_out_vma(mm, vma, address, count); count = swap_out_vma(mm, vma, address, count, classzone);
vma = vma->vm_next; vma = vma->vm_next;
if (!vma) if (!vma)
break; break;
...@@ -284,10 +287,10 @@ static inline int swap_out_mm(struct mm_struct * mm, int count, int * mmcounter) ...@@ -284,10 +287,10 @@ static inline int swap_out_mm(struct mm_struct * mm, int count, int * mmcounter)
return count; return count;
} }
static int FASTCALL(swap_out(unsigned int priority, unsigned int gfp_mask, int nr_pages)); static int FASTCALL(swap_out(unsigned int priority, unsigned int gfp_mask, zone_t * classzone));
static int swap_out(unsigned int priority, unsigned int gfp_mask, int nr_pages) static int swap_out(unsigned int priority, unsigned int gfp_mask, zone_t * classzone)
{ {
int counter; int counter, nr_pages = SWAP_CLUSTER_MAX;
struct mm_struct *mm; struct mm_struct *mm;
/* Then, look at the other mm's */ /* Then, look at the other mm's */
...@@ -312,7 +315,7 @@ static int swap_out(unsigned int priority, unsigned int gfp_mask, int nr_pages) ...@@ -312,7 +315,7 @@ static int swap_out(unsigned int priority, unsigned int gfp_mask, int nr_pages)
atomic_inc(&mm->mm_users); atomic_inc(&mm->mm_users);
spin_unlock(&mmlist_lock); spin_unlock(&mmlist_lock);
nr_pages = swap_out_mm(mm, nr_pages, &counter); nr_pages = swap_out_mm(mm, nr_pages, &counter, classzone);
mmput(mm); mmput(mm);
...@@ -327,13 +330,13 @@ static int swap_out(unsigned int priority, unsigned int gfp_mask, int nr_pages) ...@@ -327,13 +330,13 @@ static int swap_out(unsigned int priority, unsigned int gfp_mask, int nr_pages)
return 0; return 0;
} }
static int FASTCALL(shrink_cache(int nr_pages, int max_scan, unsigned int gfp_mask)); static int FASTCALL(shrink_cache(int nr_pages, int max_mapped, zone_t * classzone, unsigned int gfp_mask));
static int shrink_cache(int nr_pages, int max_scan, unsigned int gfp_mask) static int shrink_cache(int nr_pages, int max_mapped, zone_t * classzone, unsigned int gfp_mask)
{ {
struct list_head * entry; struct list_head * entry;
spin_lock(&pagemap_lru_lock); spin_lock(&pagemap_lru_lock);
while (max_scan && (entry = inactive_list.prev) != &inactive_list) { while (max_mapped && (entry = inactive_list.prev) != &inactive_list) {
struct page * page; struct page * page;
if (unlikely(current->need_resched)) { if (unlikely(current->need_resched)) {
...@@ -351,23 +354,28 @@ static int shrink_cache(int nr_pages, int max_scan, unsigned int gfp_mask) ...@@ -351,23 +354,28 @@ static int shrink_cache(int nr_pages, int max_scan, unsigned int gfp_mask)
list_del(entry); list_del(entry);
list_add(entry, &inactive_list); list_add(entry, &inactive_list);
if (PageTestandClearReferenced(page))
continue;
max_scan--; if (!memclass(page->zone, classzone))
if (unlikely(page_zone_plenty(page)))
continue; continue;
/* Racy check to avoid trylocking when not worthwhile */ /* Racy check to avoid trylocking when not worthwhile */
if (!page->buffers && page_count(page) != 1) if (!page->buffers && page_count(page) != 1)
continue; goto page_mapped;
/* /*
* The page is locked. IO in progress? * The page is locked. IO in progress?
* Move it to the back of the list. * Move it to the back of the list.
*/ */
if (unlikely(TryLockPage(page))) if (unlikely(TryLockPage(page))) {
if (PageLaunder(page) && (gfp_mask & __GFP_FS)) {
page_cache_get(page);
spin_unlock(&pagemap_lru_lock);
wait_on_page(page);
page_cache_release(page);
spin_lock(&pagemap_lru_lock);
}
continue; continue;
}
if (PageDirty(page) && is_page_cache_freeable(page) && page->mapping) { if (PageDirty(page) && is_page_cache_freeable(page) && page->mapping) {
/* /*
...@@ -383,6 +391,7 @@ static int shrink_cache(int nr_pages, int max_scan, unsigned int gfp_mask) ...@@ -383,6 +391,7 @@ static int shrink_cache(int nr_pages, int max_scan, unsigned int gfp_mask)
writepage = page->mapping->a_ops->writepage; writepage = page->mapping->a_ops->writepage;
if ((gfp_mask & __GFP_FS) && writepage) { if ((gfp_mask & __GFP_FS) && writepage) {
ClearPageDirty(page); ClearPageDirty(page);
SetPageLaunder(page);
page_cache_get(page); page_cache_get(page);
spin_unlock(&pagemap_lru_lock); spin_unlock(&pagemap_lru_lock);
...@@ -462,7 +471,10 @@ static int shrink_cache(int nr_pages, int max_scan, unsigned int gfp_mask) ...@@ -462,7 +471,10 @@ static int shrink_cache(int nr_pages, int max_scan, unsigned int gfp_mask)
if (!is_page_cache_freeable(page) || PageDirty(page)) { if (!is_page_cache_freeable(page) || PageDirty(page)) {
spin_unlock(&pagecache_lock); spin_unlock(&pagecache_lock);
UnlockPage(page); UnlockPage(page);
continue; page_mapped:
if (--max_mapped)
continue;
break;
} }
/* point of no return */ /* point of no return */
...@@ -522,8 +534,8 @@ static void refill_inactive(int nr_pages) ...@@ -522,8 +534,8 @@ static void refill_inactive(int nr_pages)
spin_unlock(&pagemap_lru_lock); spin_unlock(&pagemap_lru_lock);
} }
static int FASTCALL(shrink_caches(int priority, unsigned int gfp_mask, int nr_pages)); static int FASTCALL(shrink_caches(zone_t * classzone, int priority, unsigned int gfp_mask, int nr_pages));
static int shrink_caches(int priority, unsigned int gfp_mask, int nr_pages) static int shrink_caches(zone_t * classzone, int priority, unsigned int gfp_mask, int nr_pages)
{ {
int max_scan; int max_scan;
int chunk_size = nr_pages; int chunk_size = nr_pages;
...@@ -539,7 +551,7 @@ static int shrink_caches(int priority, unsigned int gfp_mask, int nr_pages) ...@@ -539,7 +551,7 @@ static int shrink_caches(int priority, unsigned int gfp_mask, int nr_pages)
refill_inactive(ratio); refill_inactive(ratio);
max_scan = nr_inactive_pages / priority; max_scan = nr_inactive_pages / priority;
nr_pages = shrink_cache(nr_pages, max_scan, gfp_mask); nr_pages = shrink_cache(nr_pages, max_scan, classzone, gfp_mask);
if (nr_pages <= 0) if (nr_pages <= 0)
return 0; return 0;
...@@ -552,18 +564,18 @@ static int shrink_caches(int priority, unsigned int gfp_mask, int nr_pages) ...@@ -552,18 +564,18 @@ static int shrink_caches(int priority, unsigned int gfp_mask, int nr_pages)
return nr_pages; return nr_pages;
} }
int try_to_free_pages(unsigned int gfp_mask, unsigned int order) int try_to_free_pages(zone_t *classzone, unsigned int gfp_mask, unsigned int order)
{ {
int ret = 0; int ret = 0;
int priority = DEF_PRIORITY; int priority = DEF_PRIORITY;
int nr_pages = SWAP_CLUSTER_MAX; int nr_pages = SWAP_CLUSTER_MAX;
do { do {
nr_pages = shrink_caches(priority, gfp_mask, nr_pages); nr_pages = shrink_caches(classzone, priority, gfp_mask, nr_pages);
if (nr_pages <= 0) if (nr_pages <= 0)
return 1; return 1;
ret |= swap_out(priority, gfp_mask, SWAP_CLUSTER_MAX << 2); ret |= swap_out(priority, gfp_mask, classzone);
} while (--priority); } while (--priority);
return ret; return ret;
...@@ -595,7 +607,7 @@ static int kswapd_balance_pgdat(pg_data_t * pgdat) ...@@ -595,7 +607,7 @@ static int kswapd_balance_pgdat(pg_data_t * pgdat)
schedule(); schedule();
if (!zone->need_balance) if (!zone->need_balance)
continue; continue;
if (!try_to_free_pages(GFP_KSWAPD, 0)) { if (!try_to_free_pages(zone, GFP_KSWAPD, 0)) {
zone->need_balance = 0; zone->need_balance = 0;
__set_current_state(TASK_INTERRUPTIBLE); __set_current_state(TASK_INTERRUPTIBLE);
schedule_timeout(HZ); schedule_timeout(HZ);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment