Commit a93be803 authored by Linus Torvalds's avatar Linus Torvalds

Linux 2.1.127pre2

I just found a case that could certainly result in endless page faults,
and an endless stream of __get_free_page() calls. It's been there forever,
and I bascially thought it could never happen, but thinking about it some
more it can happen a lot more easily than I thought.

The problem is that the page fault handling code will give up if it cannot
allocate a page table entry. We have code in place to handle the final
page allocation failure, but the "mid-way" failures just failed, and
caused the page fault to be done over and over again.

More importantly, this could happen from kernel mode when a system call
was trying to fill in a user page, in which case it wouldn't even be
interruptible.

It's really unlikely to happen (because the page tables tend to be set up
already), but I suspect it can be triggered by execve'ing a new process
which is not going to have any existing page tables. Even then we're
likely to have old pages available (the ones we free'd from the previous
process), but at least it doesn't sound impossible that this could be a
problem.

I've not seen this behaviour myself, but it could have caused Andrea's
problems, especially the harder to find ones. Andrea, can you check this
patch (against clean 2.1.126) out and see if it makes any difference to
your testing?

(Right now it does the wrong error code: it will cause a SIGSEGV instead
of a SIGBUS when we run out of memory, but that's a small detail).
Essentially, instead of trying to call "oom()" and sending a signal (which
doesn't work for kernel level accesses anyway), the code returns the
proper return value from handle_mm_fault(), which allows the caller to do
the right thing (which can include following the exception tables). That
way we can handle the case of running out of memory from a kernel mode
access too..

(This is also why the fault gets the wrong signal - I didn't bother to fix
up the x86 fault handler all that much ;)
Btw, the reason I'm sending out these patches in emails instead of just
putting them on ftp.kernel.org is that the machine has had disk problems
for the last week, and finally gave up completely last Friday or so. So
ftp.kernel.org is down until we have a new raid array or the old one
magically recovers.  Sorry about the spamming.

                Linus
parent d7cc008e
...@@ -151,7 +151,7 @@ int main(int argc, char ** argv) ...@@ -151,7 +151,7 @@ int main(int argc, char ** argv)
if (setup_sectors < SETUP_SECTS) if (setup_sectors < SETUP_SECTS)
setup_sectors = SETUP_SECTS; setup_sectors = SETUP_SECTS;
fprintf(stderr, "Setup is %d bytes.\n", i); fprintf(stderr, "Setup is %d bytes.\n", i);
memset(buf, sizeof(buf), 0); memset(buf, 0, sizeof(buf));
while (i < setup_sectors * 512) { while (i < setup_sectors * 512) {
c = setup_sectors * 512 - i; c = setup_sectors * 512 - i;
if (c > sizeof(buf)) if (c > sizeof(buf))
......
...@@ -156,7 +156,14 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long error_code) ...@@ -156,7 +156,14 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long error_code)
if (!(vma->vm_flags & (VM_READ | VM_EXEC))) if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
goto bad_area; goto bad_area;
} }
handle_mm_fault(tsk, vma, address, write);
/*
* If for any reason at all we couldn't handle the fault,
* make sure we exit gracefully rather than endlessly redo
* the fault.
*/
if (!handle_mm_fault(tsk, vma, address, write))
goto bad_area;
/* /*
* Did it hit the DOS screen memory VA from vm86 mode? * Did it hit the DOS screen memory VA from vm86 mode?
......
...@@ -5,7 +5,6 @@ ...@@ -5,7 +5,6 @@
* Copyright (C) 1997 Ralf Baechle (ralf@gnu.org), * Copyright (C) 1997 Ralf Baechle (ralf@gnu.org),
* derived from r4xx0.c by David S. Miller (dm@engr.sgi.com). * derived from r4xx0.c by David S. Miller (dm@engr.sgi.com).
*/ */
#include <linux/config.h>
#include <linux/init.h> #include <linux/init.h>
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/sched.h> #include <linux/sched.h>
......
...@@ -5,6 +5,7 @@ ...@@ -5,6 +5,7 @@
* Copyright (C) 1996 David S. Miller (dm@engr.sgi.com) * Copyright (C) 1996 David S. Miller (dm@engr.sgi.com)
* Copyright (C) 1997, 1998 Ralf Baechle (ralf@gnu.org) * Copyright (C) 1997, 1998 Ralf Baechle (ralf@gnu.org)
*/ */
#include <linux/config.h>
#include <linux/init.h> #include <linux/init.h>
#include <linux/kbd_ll.h> #include <linux/kbd_ll.h>
#include <linux/kernel.h> #include <linux/kernel.h>
......
...@@ -27,7 +27,6 @@ ...@@ -27,7 +27,6 @@
#include <linux/module.h> #include <linux/module.h>
#include <linux/config.h>
#include <linux/sched.h> #include <linux/sched.h>
#include <linux/fs.h> #include <linux/fs.h>
#include <linux/file.h> #include <linux/file.h>
......
...@@ -7,6 +7,8 @@ ...@@ -7,6 +7,8 @@
* Andrea Arcangeli * Andrea Arcangeli
* *
* based on work by Grant Guenther <grant@torque.net> and Phil Blundell. * based on work by Grant Guenther <grant@torque.net> and Phil Blundell.
*
* Cleaned up include files - Russell King <linux@arm.uk.linux.org>
*/ */
/* This driver should work with any hardware that is broadly compatible /* This driver should work with any hardware that is broadly compatible
......
...@@ -8,16 +8,11 @@ ...@@ -8,16 +8,11 @@
* *
* based on work by Grant Guenther <grant@torque.net> * based on work by Grant Guenther <grant@torque.net>
* and Philip Blundell * and Philip Blundell
*
* Cleaned up include files - Russell King <linux@arm.uk.linux.org>
*/ */
#include <linux/stddef.h> #include <linux/sched.h>
#include <linux/tasks.h>
#include <linux/ctype.h>
#include <asm/ptrace.h>
#include <asm/io.h>
#include <asm/dma.h>
#include <asm/irq.h>
#include <linux/delay.h> #include <linux/delay.h>
#include <linux/errno.h> #include <linux/errno.h>
#include <linux/interrupt.h> #include <linux/interrupt.h>
...@@ -26,6 +21,11 @@ ...@@ -26,6 +21,11 @@
#include <linux/malloc.h> #include <linux/malloc.h>
#include <linux/proc_fs.h> #include <linux/proc_fs.h>
#include <linux/parport.h> #include <linux/parport.h>
#include <linux/ctype.h>
#include <asm/io.h>
#include <asm/dma.h>
#include <asm/irq.h>
struct proc_dir_entry *base = NULL; struct proc_dir_entry *base = NULL;
......
...@@ -105,7 +105,7 @@ struct parport *parport_register_port(unsigned long base, int irq, int dma, ...@@ -105,7 +105,7 @@ struct parport *parport_register_port(unsigned long base, int irq, int dma,
tmp->ops = ops; tmp->ops = ops;
tmp->number = portnum; tmp->number = portnum;
memset (&tmp->probe_info, 0, sizeof (struct parport_device_info)); memset (&tmp->probe_info, 0, sizeof (struct parport_device_info));
spin_lock_init(&tmp->cad_lock); tmp->cad_lock = RW_LOCK_UNLOCKED;
spin_lock_init(&tmp->waitlist_lock); spin_lock_init(&tmp->waitlist_lock);
spin_lock_init(&tmp->pardevice_lock); spin_lock_init(&tmp->pardevice_lock);
......
...@@ -34,6 +34,7 @@ ...@@ -34,6 +34,7 @@
v1.10 4/21/97 Fixed module code so that multiple cards may be detected, v1.10 4/21/97 Fixed module code so that multiple cards may be detected,
other cleanups. -djb other cleanups. -djb
Andrea Arcangeli: Upgraded to Donald Becker's version 1.12. Andrea Arcangeli: Upgraded to Donald Becker's version 1.12.
Rick Payne: Fixed SMP race condition
*/ */
static char *version = "3c509.c:1.12 6/4/97 becker@cesdis.gsfc.nasa.gov\n"; static char *version = "3c509.c:1.12 6/4/97 becker@cesdis.gsfc.nasa.gov\n";
...@@ -59,6 +60,7 @@ static char *version = "3c509.c:1.12 6/4/97 becker@cesdis.gsfc.nasa.gov\n"; ...@@ -59,6 +60,7 @@ static char *version = "3c509.c:1.12 6/4/97 becker@cesdis.gsfc.nasa.gov\n";
#include <linux/skbuff.h> #include <linux/skbuff.h>
#include <linux/delay.h> /* for udelay() */ #include <linux/delay.h> /* for udelay() */
#include <asm/spinlock.h>
#include <asm/bitops.h> #include <asm/bitops.h>
#include <asm/io.h> #include <asm/io.h>
...@@ -122,6 +124,7 @@ enum RxFilter { ...@@ -122,6 +124,7 @@ enum RxFilter {
struct el3_private { struct el3_private {
struct enet_statistics stats; struct enet_statistics stats;
struct device *next_dev; struct device *next_dev;
spinlock_t lock;
/* skb send-queue */ /* skb send-queue */
int head, size; int head, size;
struct sk_buff *queue[SKB_QUEUE_SIZE]; struct sk_buff *queue[SKB_QUEUE_SIZE];
...@@ -401,6 +404,9 @@ el3_open(struct device *dev) ...@@ -401,6 +404,9 @@ el3_open(struct device *dev)
outw(RxReset, ioaddr + EL3_CMD); outw(RxReset, ioaddr + EL3_CMD);
outw(SetStatusEnb | 0x00, ioaddr + EL3_CMD); outw(SetStatusEnb | 0x00, ioaddr + EL3_CMD);
/* Set the spinlock before grabbing IRQ! */
((struct el3_private *)dev->priv)->lock = (spinlock_t) SPIN_LOCK_UNLOCKED;
if (request_irq(dev->irq, &el3_interrupt, 0, "3c509", dev)) { if (request_irq(dev->irq, &el3_interrupt, 0, "3c509", dev)) {
return -EAGAIN; return -EAGAIN;
} }
...@@ -520,6 +526,11 @@ el3_start_xmit(struct sk_buff *skb, struct device *dev) ...@@ -520,6 +526,11 @@ el3_start_xmit(struct sk_buff *skb, struct device *dev)
if (test_and_set_bit(0, (void*)&dev->tbusy) != 0) if (test_and_set_bit(0, (void*)&dev->tbusy) != 0)
printk("%s: Transmitter access conflict.\n", dev->name); printk("%s: Transmitter access conflict.\n", dev->name);
else { else {
unsigned long flags;
/* Spin on the lock, until we're clear of an IRQ */
spin_lock_irqsave(&lp->lock, flags);
/* Put out the doubleword header... */ /* Put out the doubleword header... */
outw(skb->len, ioaddr + TX_FIFO); outw(skb->len, ioaddr + TX_FIFO);
outw(0x00, ioaddr + TX_FIFO); outw(0x00, ioaddr + TX_FIFO);
...@@ -536,6 +547,8 @@ el3_start_xmit(struct sk_buff *skb, struct device *dev) ...@@ -536,6 +547,8 @@ el3_start_xmit(struct sk_buff *skb, struct device *dev)
} else } else
/* Interrupt us when the FIFO has room for max-sized packet. */ /* Interrupt us when the FIFO has room for max-sized packet. */
outw(SetTxThreshold + 1536, ioaddr + EL3_CMD); outw(SetTxThreshold + 1536, ioaddr + EL3_CMD);
spin_unlock_irqrestore(&lp->lock, flags);
} }
dev_kfree_skb (skb); dev_kfree_skb (skb);
...@@ -560,6 +573,7 @@ static void ...@@ -560,6 +573,7 @@ static void
el3_interrupt(int irq, void *dev_id, struct pt_regs *regs) el3_interrupt(int irq, void *dev_id, struct pt_regs *regs)
{ {
struct device *dev = (struct device *)dev_id; struct device *dev = (struct device *)dev_id;
struct el3_private *lp;
int ioaddr, status; int ioaddr, status;
int i = INTR_WORK; int i = INTR_WORK;
...@@ -568,6 +582,9 @@ el3_interrupt(int irq, void *dev_id, struct pt_regs *regs) ...@@ -568,6 +582,9 @@ el3_interrupt(int irq, void *dev_id, struct pt_regs *regs)
return; return;
} }
lp = (struct el3_private *)dev->priv;
spin_lock(&lp->lock);
if (dev->interrupt) if (dev->interrupt)
printk("%s: Re-entering the interrupt handler.\n", dev->name); printk("%s: Re-entering the interrupt handler.\n", dev->name);
dev->interrupt = 1; dev->interrupt = 1;
...@@ -629,7 +646,7 @@ el3_interrupt(int irq, void *dev_id, struct pt_regs *regs) ...@@ -629,7 +646,7 @@ el3_interrupt(int irq, void *dev_id, struct pt_regs *regs)
printk("%s: exiting interrupt, status %4.4x.\n", dev->name, printk("%s: exiting interrupt, status %4.4x.\n", dev->name,
inw(ioaddr + EL3_STATUS)); inw(ioaddr + EL3_STATUS));
} }
spin_unlock(&lp->lock);
dev->interrupt = 0; dev->interrupt = 0;
return; return;
} }
......
...@@ -55,7 +55,6 @@ static const char *version = ...@@ -55,7 +55,6 @@ static const char *version =
"ne2.c:v0.90 Oct 14 1998 David Weinehall <tao@acc.umu.se>\n"; "ne2.c:v0.90 Oct 14 1998 David Weinehall <tao@acc.umu.se>\n";
#include <linux/module.h> #include <linux/module.h>
#include <linux/config.h>
#include <linux/version.h> #include <linux/version.h>
#include <linux/kernel.h> #include <linux/kernel.h>
......
Wed Oct 21 21:00 1998 Gerard Roudier (groudier@club-internet.fr)
* revision 3.1a
- Changes from Eddie Dost for Sparc and Alpha:
ioremap/iounmap support for Sparc.
pcivtophys changed to bus_dvma_to_phys.
- Add the 53c876 description to the chip table. This is only usefull
for printing the right name of the controller.
- DEL-441 Item 2 work-around for the 53c876 rev <= 5 (0x15).
- Add additionnal checking of INQUIRY data:
Check INQUIRY data received length is at least 7. Byte 7 of
inquiry data contains device features bits and the driver might
be confused by garbage. Also check peripheral qualifier.
- Cleanup of the SCSI tasks management:
Remove the special case for 32 tags. Now the driver only uses the
scheme that allows up to 64 tags per LUN.
Merge some code from the 896 driver.
Use a 1,3,5,...MAXTAGS*2+1 tag numbering. Previous driver could
use any tag number from 1 to 253 and some non conformant devices
might have problems with large tag numbers.
- 'no_sync' changed to 'no_disc' in the README file. This is an old
and trivial mistake that seems to demonstrate the README file is
not often read. :)
Sun Oct 4 14:00 1998 Gerard Roudier (groudier@club-internet.fr) Sun Oct 4 14:00 1998 Gerard Roudier (groudier@club-internet.fr)
* revision 3.0i * revision 3.0i
- Cosmetic changes for sparc (but not for the driver) that needs - Cosmetic changes for sparc (but not for the driver) that needs
......
...@@ -4,7 +4,7 @@ Written by Gerard Roudier <groudier@club-internet.fr> ...@@ -4,7 +4,7 @@ Written by Gerard Roudier <groudier@club-internet.fr>
21 Rue Carnot 21 Rue Carnot
95170 DEUIL LA BARRE - FRANCE 95170 DEUIL LA BARRE - FRANCE
27 June 1998 18 October 1998
=============================================================================== ===============================================================================
1. Introduction 1. Introduction
...@@ -21,7 +21,7 @@ Written by Gerard Roudier <groudier@club-internet.fr> ...@@ -21,7 +21,7 @@ Written by Gerard Roudier <groudier@club-internet.fr>
8.4 Set order type for tagged command 8.4 Set order type for tagged command
8.5 Set debug mode 8.5 Set debug mode
8.6 Clear profile counters 8.6 Clear profile counters
8.7 Set flag (no_sync) 8.7 Set flag (no_disc)
8.8 Set verbose level 8.8 Set verbose level
9. Configuration parameters 9. Configuration parameters
10. Boot setup commands 10. Boot setup commands
...@@ -424,7 +424,7 @@ Available commands: ...@@ -424,7 +424,7 @@ Available commands:
The "clearprof" command allows you to clear these counters at any time. The "clearprof" command allows you to clear these counters at any time.
8.7 Set flag (no_sync) 8.7 Set flag (no_disc)
setflag <target> <flag> setflag <target> <flag>
...@@ -432,11 +432,11 @@ Available commands: ...@@ -432,11 +432,11 @@ Available commands:
For the moment, only one flag is available: For the moment, only one flag is available:
no_sync: not allow target to disconnect. no_disc: not allow target to disconnect.
Do not specify any flag in order to reset the flag. For example: Do not specify any flag in order to reset the flag. For example:
- setflag 4 - setflag 4
will reset no_sync flag for target 4, so will allow it disconnections. will reset no_disc flag for target 4, so will allow it disconnections.
- setflag all - setflag all
will allow disconnection for all devices on the SCSI bus. will allow disconnection for all devices on the SCSI bus.
...@@ -1067,7 +1067,7 @@ Try to enable one feature at a time with control commands. For example: ...@@ -1067,7 +1067,7 @@ Try to enable one feature at a time with control commands. For example:
Will enable fast synchronous data transfer negotiation for all targets. Will enable fast synchronous data transfer negotiation for all targets.
- echo "setflag 3" >/proc/scsi/ncr53c8xx/0 - echo "setflag 3" >/proc/scsi/ncr53c8xx/0
Will reset flags (no_sync) for target 3, and so will allow it to disconnect Will reset flags (no_disc) for target 3, and so will allow it to disconnect
the SCSI Bus. the SCSI Bus.
- echo "settags 3 8" >/proc/scsi/ncr53c8xx/0 - echo "settags 3 8" >/proc/scsi/ncr53c8xx/0
......
...@@ -73,7 +73,7 @@ ...@@ -73,7 +73,7 @@
*/ */
/* /*
** October 4 1998, version 3.0i ** October 21 1998, version 3.1a
** **
** Supported SCSI-II features: ** Supported SCSI-II features:
** Synchronous negotiation ** Synchronous negotiation
...@@ -169,9 +169,11 @@ ...@@ -169,9 +169,11 @@
#endif #endif
/* /*
** Define the BSD style u_int32 type ** Define the BSD style u_int32 and u_int64 type.
** Are in fact u_int32_t and u_int64_t :-)
*/ */
typedef u32 u_int32; typedef u32 u_int32;
typedef u64 u_int64;
#include "ncr53c8xx.h" #include "ncr53c8xx.h"
...@@ -366,25 +368,14 @@ static inline struct xpt_quehead *xpt_remque_tail(struct xpt_quehead *head) ...@@ -366,25 +368,14 @@ static inline struct xpt_quehead *xpt_remque_tail(struct xpt_quehead *head)
#define NO_TAG (255) #define NO_TAG (255)
/* /*
** For more than 32 TAGS support, we do some address calculation ** Choose appropriate type for tag bitmap.
** from the SCRIPTS using 2 additionnal SCR_COPY's and a fiew
** bit handling on 64 bit integers. For these reasons, support for
** 32 up to 64 TAGS is compiled conditionnaly.
*/ */
#if SCSI_NCR_MAX_TAGS > 32
#if SCSI_NCR_MAX_TAGS <= 32 typedef u_int64 tagmap_t;
struct nlink {
ncrcmd l_cmd;
ncrcmd l_paddr;
};
#else #else
struct nlink { typedef u_int32 tagmap_t;
ncrcmd l_paddr;
};
typedef u64 u_int64;
#endif #endif
/* /*
** Number of targets supported by the driver. ** Number of targets supported by the driver.
** n permits target numbers 0..n-1. ** n permits target numbers 0..n-1.
...@@ -583,16 +574,12 @@ static spinlock_t driver_lock; ...@@ -583,16 +574,12 @@ static spinlock_t driver_lock;
#define iounmap vfree #define iounmap vfree
#endif #endif
#ifdef __sparc__ #if defined (__sparc__)
#include <asm/irq.h> #include <asm/irq.h>
#define remap_pci_mem(base, size) ((vm_offset_t) __va(base)) #elif defined (__alpha__)
#define unmap_pci_mem(vaddr, size) #define bus_dvma_to_mem(p) ((p) & 0xfffffffful)
#define pcivtophys(p) ((p) & pci_dvma_mask)
#else #else
#if defined(__alpha__) #define bus_dvma_to_mem(p) (p)
#define pcivtophys(p) ((p) & 0xfffffffful)
#else
#define pcivtophys(p) (p)
#endif #endif
#ifndef NCR_IOMAPPED #ifndef NCR_IOMAPPED
...@@ -615,7 +602,6 @@ static void unmap_pci_mem(vm_offset_t vaddr, u_long size) ...@@ -615,7 +602,6 @@ static void unmap_pci_mem(vm_offset_t vaddr, u_long size)
iounmap((void *) (vaddr & PAGE_MASK)); iounmap((void *) (vaddr & PAGE_MASK));
} }
#endif /* !NCR_IOMAPPED */ #endif /* !NCR_IOMAPPED */
#endif /* __sparc__ */
/* /*
** Insert a delay in micro-seconds and milli-seconds. ** Insert a delay in micro-seconds and milli-seconds.
...@@ -1488,8 +1474,8 @@ struct lcb { ...@@ -1488,8 +1474,8 @@ struct lcb {
** 64 possible tags. ** 64 possible tags.
**---------------------------------------------------------------- **----------------------------------------------------------------
*/ */
struct nlink jump_ccb_0; /* Default table if no tags */ u_int32 jump_ccb_0; /* Default table if no tags */
struct nlink *jump_ccb; /* Virtual address */ u_int32 *jump_ccb; /* Virtual address */
/*---------------------------------------------------------------- /*----------------------------------------------------------------
** CCB queue management. ** CCB queue management.
...@@ -1514,11 +1500,7 @@ struct lcb { ...@@ -1514,11 +1500,7 @@ struct lcb {
*/ */
u_char ia_tag; /* Allocation index */ u_char ia_tag; /* Allocation index */
u_char if_tag; /* Freeing index */ u_char if_tag; /* Freeing index */
#if SCSI_NCR_MAX_TAGS <= 32 u_char cb_tags[SCSI_NCR_MAX_TAGS]; /* Circular tags buffer */
u_char cb_tags[32]; /* Circular tags buffer */
#else
u_char cb_tags[64]; /* Circular tags buffer */
#endif
u_char usetags; /* Command queuing is active */ u_char usetags; /* Command queuing is active */
u_char maxtags; /* Max nr of tags asked by user */ u_char maxtags; /* Max nr of tags asked by user */
u_char numtags; /* Current number of tags */ u_char numtags; /* Current number of tags */
...@@ -1528,14 +1510,13 @@ struct lcb { ...@@ -1528,14 +1510,13 @@ struct lcb {
** QUEUE FULL control and ORDERED tag control. ** QUEUE FULL control and ORDERED tag control.
**---------------------------------------------------------------- **----------------------------------------------------------------
*/ */
/*----------------------------------------------------------------
** QUEUE FULL and ORDERED tag control.
**----------------------------------------------------------------
*/
u_short num_good; /* Nr of GOOD since QUEUE FULL */ u_short num_good; /* Nr of GOOD since QUEUE FULL */
#if SCSI_NCR_MAX_TAGS <= 32 tagmap_t tags_umap; /* Used tags bitmap */
u_int tags_umap; /* Used tags bitmap */ tagmap_t tags_smap; /* Tags in use at 'tag_stime' */
u_int tags_smap; /* Tags in use at 'tag_stime' */
#else
u_int64 tags_umap; /* Used tags bitmap */
u_int64 tags_smap; /* Tags in use at 'tag_stime' */
#endif
u_long tags_stime; /* Last time we set smap=umap */ u_long tags_stime; /* Last time we set smap=umap */
ccb_p held_ccb; /* CCB held for QUEUE FULL */ ccb_p held_ccb; /* CCB held for QUEUE FULL */
}; };
...@@ -2065,18 +2046,10 @@ struct script { ...@@ -2065,18 +2046,10 @@ struct script {
ncrcmd loadpos1 [ 4]; ncrcmd loadpos1 [ 4];
#endif #endif
ncrcmd resel_lun [ 6]; ncrcmd resel_lun [ 6];
#if SCSI_NCR_MAX_TAGS <= 32
ncrcmd resel_tag [ 8];
#else
ncrcmd resel_tag [ 6]; ncrcmd resel_tag [ 6];
ncrcmd jump_to_nexus [ 4]; ncrcmd jump_to_nexus [ 4];
ncrcmd nexus_indirect [ 4]; ncrcmd nexus_indirect [ 4];
#endif
#if SCSI_NCR_MAX_TAGS <= 32
ncrcmd resel_notag [ 4];
#else
ncrcmd resel_notag [ 4]; ncrcmd resel_notag [ 4];
#endif
ncrcmd data_in [MAX_SCATTERL * 4]; ncrcmd data_in [MAX_SCATTERL * 4];
ncrcmd data_in2 [ 4]; ncrcmd data_in2 [ 4];
ncrcmd data_out [MAX_SCATTERL * 4]; ncrcmd data_out [MAX_SCATTERL * 4];
...@@ -2987,18 +2960,12 @@ static struct script script0 __initdata = { ...@@ -2987,18 +2960,12 @@ static struct script script0 __initdata = {
/* /*
** Read the TAG from the SIDL. ** Read the TAG from the SIDL.
** Still an aggressive optimization. ;-) ** Still an aggressive optimization. ;-)
** Compute the CCB indirect jump address which
** is (#TAG*2 & 0xfc) due to tag numbering using
** 1,3,5..MAXTAGS*2+1 actual values.
*/ */
SCR_FROM_REG (sidl), SCR_REG_SFBR (sidl, SCR_SHL, 0),
0,
/*
** JUMP indirectly to the restart point of the CCB.
*/
#if SCSI_NCR_MAX_TAGS <= 32
SCR_SFBR_REG (temp, SCR_AND, 0xf8),
0, 0,
SCR_RETURN,
0,
#else
SCR_SFBR_REG (temp, SCR_AND, 0xfc), SCR_SFBR_REG (temp, SCR_AND, 0xfc),
0, 0,
}/*-------------------------< JUMP_TO_NEXUS >-------------------*/,{ }/*-------------------------< JUMP_TO_NEXUS >-------------------*/,{
...@@ -3011,7 +2978,6 @@ static struct script script0 __initdata = { ...@@ -3011,7 +2978,6 @@ static struct script script0 __initdata = {
RADDR (temp), RADDR (temp),
SCR_RETURN, SCR_RETURN,
0, 0,
#endif
}/*-------------------------< RESEL_NOTAG >-------------------*/,{ }/*-------------------------< RESEL_NOTAG >-------------------*/,{
/* /*
** No tag expected. ** No tag expected.
...@@ -3019,13 +2985,8 @@ static struct script script0 __initdata = { ...@@ -3019,13 +2985,8 @@ static struct script script0 __initdata = {
*/ */
SCR_MOVE_ABS (1) ^ SCR_MSG_IN, SCR_MOVE_ABS (1) ^ SCR_MSG_IN,
NADDR (msgin), NADDR (msgin),
#if SCSI_NCR_MAX_TAGS <= 32
SCR_RETURN,
0,
#else
SCR_JUMP, SCR_JUMP,
PADDR (jump_to_nexus), PADDR (jump_to_nexus),
#endif
}/*-------------------------< DATA_IN >--------------------*/,{ }/*-------------------------< DATA_IN >--------------------*/,{
/* /*
** Because the size depends on the ** Because the size depends on the
...@@ -3907,7 +3868,7 @@ static void ncr_script_copy_and_bind (ncb_p np, ncrcmd *src, ncrcmd *dst, int le ...@@ -3907,7 +3868,7 @@ static void ncr_script_copy_and_bind (ncb_p np, ncrcmd *src, ncrcmd *dst, int le
switch (old & RELOC_MASK) { switch (old & RELOC_MASK) {
case RELOC_REGISTER: case RELOC_REGISTER:
new = (old & ~RELOC_MASK) new = (old & ~RELOC_MASK)
+ pcivtophys(np->paddr); + bus_dvma_to_mem(np->paddr);
break; break;
case RELOC_LABEL: case RELOC_LABEL:
new = (old & ~RELOC_MASK) + np->p_script; new = (old & ~RELOC_MASK) + np->p_script;
...@@ -4654,7 +4615,7 @@ printk(KERN_INFO "ncr53c%s-%d: rev=0x%02x, base=0x%lx, io_port=0x%lx, irq=%d\n", ...@@ -4654,7 +4615,7 @@ printk(KERN_INFO "ncr53c%s-%d: rev=0x%02x, base=0x%lx, io_port=0x%lx, irq=%d\n",
np->scripth = np->scripth0; np->scripth = np->scripth0;
np->p_scripth = vtophys(np->scripth); np->p_scripth = vtophys(np->scripth);
np->p_script = (np->paddr2) ? pcivtophys(np->paddr2) : vtophys(np->script0); np->p_script = (np->paddr2) ? bus_dvma_to_mem(np->paddr2) : vtophys(np->script0);
ncr_script_copy_and_bind (np, (ncrcmd *) &script0, (ncrcmd *) np->script0, sizeof(struct script)); ncr_script_copy_and_bind (np, (ncrcmd *) &script0, (ncrcmd *) np->script0, sizeof(struct script));
ncr_script_copy_and_bind (np, (ncrcmd *) &scripth0, (ncrcmd *) np->scripth0, sizeof(struct scripth)); ncr_script_copy_and_bind (np, (ncrcmd *) &scripth0, (ncrcmd *) np->scripth0, sizeof(struct scripth));
...@@ -5063,12 +5024,12 @@ int ncr_queue_command (ncb_p np, Scsi_Cmnd *cmd) ...@@ -5063,12 +5024,12 @@ int ncr_queue_command (ncb_p np, Scsi_Cmnd *cmd)
} }
} }
msgptr[msglen++] = order; msgptr[msglen++] = order;
#if SCSI_NCR_MAX_TAGS <= 32 /*
msgptr[msglen++] = (cp->tag << 3) + 1; ** Actual tags are numbered 1,3,5,..2*MAXTAGS+1,
#else ** since we may have to deal with devices that have
msgptr[msglen++] = (cp->tag << 2) + 1; ** problems with #TAG 0 or too great #TAG numbers.
#endif */
msgptr[msglen++] = (cp->tag << 1) + 1;
} }
switch (nego) { switch (nego) {
...@@ -5316,7 +5277,7 @@ static void ncr_start_next_ccb(ncb_p np, lcb_p lp, int maxn) ...@@ -5316,7 +5277,7 @@ static void ncr_start_next_ccb(ncb_p np, lcb_p lp, int maxn)
++lp->queuedccbs; ++lp->queuedccbs;
cp = xpt_que_entry(qp, struct ccb, link_ccbq); cp = xpt_que_entry(qp, struct ccb, link_ccbq);
xpt_insque_tail(qp, &lp->busy_ccbq); xpt_insque_tail(qp, &lp->busy_ccbq);
lp->jump_ccb[cp->tag == NO_TAG ? 0 : cp->tag].l_paddr = lp->jump_ccb[cp->tag == NO_TAG ? 0 : cp->tag] =
cpu_to_scr(CCB_PHYS (cp, restart)); cpu_to_scr(CCB_PHYS (cp, restart));
ncr_put_start_queue(np, cp); ncr_put_start_queue(np, cp);
} }
...@@ -5705,7 +5666,7 @@ static int ncr_detach(ncb_p np) ...@@ -5705,7 +5666,7 @@ static int ncr_detach(ncb_p np)
#ifdef DEBUG_NCR53C8XX #ifdef DEBUG_NCR53C8XX
printk("%s: freeing lp (%lx)\n", ncr_name(np), (u_long) lp); printk("%s: freeing lp (%lx)\n", ncr_name(np), (u_long) lp);
#endif #endif
if (lp->maxnxs > 1) if (lp->jump_ccb != &lp->jump_ccb_0)
m_free(lp->jump_ccb, 256); m_free(lp->jump_ccb, 256);
m_free(lp, sizeof(*lp)); m_free(lp, sizeof(*lp));
} }
...@@ -5861,9 +5822,10 @@ void ncr_complete (ncb_p np, ccb_p cp) ...@@ -5861,9 +5822,10 @@ void ncr_complete (ncb_p np, ccb_p cp)
/* /*
** On standard INQUIRY response (EVPD and CmDt ** On standard INQUIRY response (EVPD and CmDt
** not set), setup logical unit according to ** not set), setup logical unit according to
** announced capabilities. ** announced capabilities (we need the 1rst 7 bytes).
*/ */
if (cmd->cmnd[0] == 0x12 && !(cmd->cmnd[1] & 0x3)) { if (cmd->cmnd[0] == 0x12 && !(cmd->cmnd[1] & 0x3) &&
cmd->cmnd[4] >= 7) {
ncr_setup_lcb (np, cmd->target, cmd->lun, ncr_setup_lcb (np, cmd->target, cmd->lun,
(char *) cmd->request_buffer); (char *) cmd->request_buffer);
} }
...@@ -6218,6 +6180,14 @@ void ncr_init (ncb_p np, int reset, char * msg, u_long code) ...@@ -6218,6 +6180,14 @@ void ncr_init (ncb_p np, int reset, char * msg, u_long code)
np->scsi_mode = INB (nc_stest4) & SMODE; np->scsi_mode = INB (nc_stest4) & SMODE;
} }
/*
** DEL 441 - 53C876 Rev 5 - Part Number 609-0392787/2788 - ITEM 2.
** Disable overlapped arbitration.
*/
if (np->device_id == PCI_DEVICE_ID_NCR_53C875 &&
np->revision_id >= 0x10 && np->revision_id <= 0x15)
OUTB (nc_ctest0, (1<<5));
/* /*
** Fill in target structure. ** Fill in target structure.
** Reinitialize usrsync. ** Reinitialize usrsync.
...@@ -7778,7 +7748,7 @@ void ncr_int_sir (ncb_p np) ...@@ -7778,7 +7748,7 @@ void ncr_int_sir (ncb_p np)
** We just assume lun=0, 1 CCB, no tag. ** We just assume lun=0, 1 CCB, no tag.
*/ */
if (tp->lp[0]) { if (tp->lp[0]) {
OUTL (nc_dsp, scr_to_cpu(tp->lp[0]->jump_ccb[0].l_paddr)); OUTL (nc_dsp, scr_to_cpu(tp->lp[0]->jump_ccb[0]));
return; return;
} }
case SIR_RESEL_BAD_TARGET: /* Will send a TARGET RESET message */ case SIR_RESEL_BAD_TARGET: /* Will send a TARGET RESET message */
...@@ -8307,17 +8277,9 @@ static ccb_p ncr_get_ccb (ncb_p np, u_char tn, u_char ln) ...@@ -8307,17 +8277,9 @@ static ccb_p ncr_get_ccb (ncb_p np, u_char tn, u_char ln)
if (lp) { if (lp) {
if (tag != NO_TAG) { if (tag != NO_TAG) {
++lp->ia_tag; ++lp->ia_tag;
#if SCSI_NCR_MAX_TAGS <= 32 if (lp->ia_tag == SCSI_NCR_MAX_TAGS)
if (lp->ia_tag == 32)
#else
if (lp->ia_tag == 64)
#endif
lp->ia_tag = 0; lp->ia_tag = 0;
#if SCSI_NCR_MAX_TAGS <= 32 lp->tags_umap |= (((tagmap_t) 1) << tag);
lp->tags_umap |= (1u << tag);
#else
lp->tags_umap |= (((u_int64) 1) << tag);
#endif
} }
} }
...@@ -8363,22 +8325,14 @@ static void ncr_free_ccb (ncb_p np, ccb_p cp) ...@@ -8363,22 +8325,14 @@ static void ncr_free_ccb (ncb_p np, ccb_p cp)
if (lp) { if (lp) {
if (cp->tag != NO_TAG) { if (cp->tag != NO_TAG) {
lp->cb_tags[lp->if_tag++] = cp->tag; lp->cb_tags[lp->if_tag++] = cp->tag;
#if SCSI_NCR_MAX_TAGS <= 32 if (lp->if_tag == SCSI_NCR_MAX_TAGS)
if (lp->if_tag == 32)
#else
if (lp->if_tag == 64)
#endif
lp->if_tag = 0; lp->if_tag = 0;
#if SCSI_NCR_MAX_TAGS <= 32 lp->tags_umap &= ~(((tagmap_t) 1) << cp->tag);
lp->tags_umap &= ~(1u << cp->tag);
#else
lp->tags_umap &= ~(((u_int64) 1) << cp->tag);
#endif
lp->tags_smap &= lp->tags_umap; lp->tags_smap &= lp->tags_umap;
lp->jump_ccb[cp->tag].l_paddr = lp->jump_ccb[cp->tag] =
cpu_to_scr(NCB_SCRIPTH_PHYS(np, bad_i_t_l_q)); cpu_to_scr(NCB_SCRIPTH_PHYS(np, bad_i_t_l_q));
} else { } else {
lp->jump_ccb[0].l_paddr = lp->jump_ccb[0] =
cpu_to_scr(NCB_SCRIPTH_PHYS(np, bad_i_t_l)); cpu_to_scr(NCB_SCRIPTH_PHYS(np, bad_i_t_l));
} }
} }
...@@ -8412,7 +8366,7 @@ static void ncr_free_ccb (ncb_p np, ccb_p cp) ...@@ -8412,7 +8366,7 @@ static void ncr_free_ccb (ncb_p np, ccb_p cp)
#define ncr_reg_bus_addr(r) \ #define ncr_reg_bus_addr(r) \
(pcivtophys(np->paddr) + offsetof (struct ncr_reg, r)) (bus_dvma_to_mem(np->paddr) + offsetof (struct ncr_reg, r))
/*------------------------------------------------------------------------ /*------------------------------------------------------------------------
** Initialize the fixed part of a CCB structure. ** Initialize the fixed part of a CCB structure.
...@@ -8578,28 +8532,6 @@ static void ncr_init_tcb (ncb_p np, u_char tn) ...@@ -8578,28 +8532,6 @@ static void ncr_init_tcb (ncb_p np, u_char tn)
} }
/*------------------------------------------------------------------------
** Reselection JUMP table initialisation.
**------------------------------------------------------------------------
** The SCRIPTS processor jumps on reselection to the entry
** corresponding to the CCB using the tag as offset.
**------------------------------------------------------------------------
*/
static void ncr_setup_jump_ccb(ncb_p np, lcb_p lp)
{
int i;
lp->p_jump_ccb = cpu_to_scr(vtophys(lp->jump_ccb));
for (i = 0 ; i < lp->maxnxs ; i++) {
#if SCSI_NCR_MAX_TAGS <= 32
lp->jump_ccb[i].l_cmd = cpu_to_scr(SCR_JUMP);
#endif
lp->jump_ccb[i].l_paddr =
cpu_to_scr(NCB_SCRIPTH_PHYS (np, bad_i_t_l_q));
lp->cb_tags[i] = i;
}
}
/*------------------------------------------------------------------------ /*------------------------------------------------------------------------
** Lun control block allocation and initialization. ** Lun control block allocation and initialization.
**------------------------------------------------------------------------ **------------------------------------------------------------------------
...@@ -8649,12 +8581,12 @@ static lcb_p ncr_alloc_lcb (ncb_p np, u_char tn, u_char ln) ...@@ -8649,12 +8581,12 @@ static lcb_p ncr_alloc_lcb (ncb_p np, u_char tn, u_char ln)
xpt_que_init(&lp->skip_ccbq); xpt_que_init(&lp->skip_ccbq);
/* /*
** Set max CCBs to 1 and use the default jump table ** Set max CCBs to 1 and use the default 1 entry
** by default. ** jump table by default.
*/ */
lp->maxnxs = 1; lp->maxnxs = 1;
lp->jump_ccb = &lp->jump_ccb_0; lp->jump_ccb = &lp->jump_ccb_0;
ncr_setup_jump_ccb(np, lp); lp->p_jump_ccb = cpu_to_scr(vtophys(lp->jump_ccb));
/* /*
** Initilialyze the reselect script: ** Initilialyze the reselect script:
...@@ -8732,6 +8664,13 @@ static lcb_p ncr_setup_lcb (ncb_p np, u_char tn, u_char ln, u_char *inq_data) ...@@ -8732,6 +8664,13 @@ static lcb_p ncr_setup_lcb (ncb_p np, u_char tn, u_char ln, u_char *inq_data)
if ((inq_data[2] & 0x7) >= 2 && (inq_data[3] & 0xf) == 2) if ((inq_data[2] & 0x7) >= 2 && (inq_data[3] & 0xf) == 2)
inq_byte7 = inq_data[7]; inq_byte7 = inq_data[7];
/*
** Throw away announced LUN capabilities if we are told
** that there is no real device supported by the logical unit.
*/
if ((inq_data[0] & 0xe0) > 0x20 || (inq_data[0] & 0x1f) == 0x1f)
inq_byte7 &= (INQ7_SYNC | INQ7_WIDE16);
/* /*
** If user is wanting SYNC, force this feature. ** If user is wanting SYNC, force this feature.
*/ */
...@@ -8751,18 +8690,20 @@ static lcb_p ncr_setup_lcb (ncb_p np, u_char tn, u_char ln, u_char *inq_data) ...@@ -8751,18 +8690,20 @@ static lcb_p ncr_setup_lcb (ncb_p np, u_char tn, u_char ln, u_char *inq_data)
** If unit supports tagged commands, allocate the ** If unit supports tagged commands, allocate the
** CCB JUMP table if not yet. ** CCB JUMP table if not yet.
*/ */
if ((inq_byte7 & INQ7_QUEUE) && lp->maxnxs < 2) { if ((inq_byte7 & INQ7_QUEUE) && lp->jump_ccb == &lp->jump_ccb_0) {
struct nlink *jumps; int i;
jumps = m_alloc(256, 8); lp->jump_ccb = m_alloc(256, 8);
if (!jumps) if (!lp->jump_ccb) {
lp->jump_ccb = &lp->jump_ccb_0;
goto fail; goto fail;
#if SCSI_NCR_MAX_TAGS <= 32 }
lp->maxnxs = 32; lp->p_jump_ccb = cpu_to_scr(vtophys(lp->jump_ccb));
#else for (i = 0 ; i < 64 ; i++)
lp->maxnxs = 64; lp->jump_ccb[i] =
#endif cpu_to_scr(NCB_SCRIPTH_PHYS (np, bad_i_t_l_q));
lp->jump_ccb = jumps; for (i = 0 ; i < SCSI_NCR_MAX_TAGS ; i++)
ncr_setup_jump_ccb(np, lp); lp->cb_tags[i] = i;
lp->maxnxs = SCSI_NCR_MAX_TAGS;
lp->tags_stime = jiffies; lp->tags_stime = jiffies;
} }
......
...@@ -45,7 +45,7 @@ ...@@ -45,7 +45,7 @@
/* /*
** Name and revision of the driver ** Name and revision of the driver
*/ */
#define SCSI_NCR_DRIVER_NAME "ncr53c8xx - revision 3.0i" #define SCSI_NCR_DRIVER_NAME "ncr53c8xx - revision 3.1a"
/* /*
** Check supported Linux versions ** Check supported Linux versions
...@@ -468,7 +468,10 @@ typedef struct { ...@@ -468,7 +468,10 @@ typedef struct {
{PCI_DEVICE_ID_NCR_53C875, 0x01, "875", 6, 16, 5, \ {PCI_DEVICE_ID_NCR_53C875, 0x01, "875", 6, 16, 5, \
FE_WIDE|FE_ULTRA|FE_CLK80|FE_CACHE0_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN|FE_RAM}\ FE_WIDE|FE_ULTRA|FE_CLK80|FE_CACHE0_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN|FE_RAM}\
, \ , \
{PCI_DEVICE_ID_NCR_53C875, 0xff, "875", 6, 16, 5, \ {PCI_DEVICE_ID_NCR_53C875, 0x0f, "875", 6, 16, 5, \
FE_WIDE|FE_ULTRA|FE_DBLR|FE_CACHE0_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN|FE_RAM}\
, \
{PCI_DEVICE_ID_NCR_53C875, 0xff, "876", 6, 16, 5, \
FE_WIDE|FE_ULTRA|FE_DBLR|FE_CACHE0_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN|FE_RAM}\ FE_WIDE|FE_ULTRA|FE_DBLR|FE_CACHE0_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN|FE_RAM}\
, \ , \
{PCI_DEVICE_ID_NCR_53C875J,0xff, "875J", 6, 16, 5, \ {PCI_DEVICE_ID_NCR_53C875J,0xff, "875J", 6, 16, 5, \
......
...@@ -65,7 +65,8 @@ ...@@ -65,7 +65,8 @@
#define SD_MINOR_NUMBER(i) ((i) & 255) #define SD_MINOR_NUMBER(i) ((i) & 255)
#define MKDEV_SD_PARTITION(i) MKDEV(SD_MAJOR_NUMBER(i), (i) & 255) #define MKDEV_SD_PARTITION(i) MKDEV(SD_MAJOR_NUMBER(i), (i) & 255)
#define MKDEV_SD(index) MKDEV_SD_PARTITION((index) << 4) #define MKDEV_SD(index) MKDEV_SD_PARTITION((index) << 4)
#define N_USED_SD_MAJORS ((sd_template.dev_max + SCSI_DISKS_PER_MAJOR - 1) / SCSI_DISKS_PER_MAJOR) #define N_USED_SCSI_DISKS (sd_template.dev_max + SCSI_DISKS_PER_MAJOR - 1)
#define N_USED_SD_MAJORS (N_USED_SCSI_DISKS / SCSI_DISKS_PER_MAJOR)
#define MAX_RETRIES 5 #define MAX_RETRIES 5
...@@ -1783,13 +1784,13 @@ void cleanup_module( void) ...@@ -1783,13 +1784,13 @@ void cleanup_module( void)
for (sdgd = gendisk_head; sdgd; sdgd = sdgd->next) for (sdgd = gendisk_head; sdgd; sdgd = sdgd->next)
{ {
if (sdgd->next >= sd_gendisks && sdgd->next <= LAST_SD_GENDISK) if (sdgd->next >= sd_gendisks && sdgd->next <= LAST_SD_GENDISK.max_nr)
removed++, sdgd->next = sdgd->next->next; removed++, sdgd->next = sdgd->next->next;
else sdgd = sdgd->next; else sdgd = sdgd->next;
} }
if (removed != N_USED_SCSI_DISKS) if (removed != N_USED_SD_MAJORS)
printk("%s %d sd_gendisks in disk chain", printk("%s %d sd_gendisks in disk chain",
removed > N_USED_SCSI_DISKS ? "total" : "just", removed); removed > N_USED_SD_MAJORS ? "total" : "just", removed);
} }
......
...@@ -101,6 +101,7 @@ ...@@ -101,6 +101,7 @@
/*****************************************************************************/ /*****************************************************************************/
#include <linux/config.h>
#include <linux/version.h> #include <linux/version.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/string.h> #include <linux/string.h>
......
...@@ -626,7 +626,7 @@ struct dentry * open_namei(const char * pathname, int flag, int mode) ...@@ -626,7 +626,7 @@ struct dentry * open_namei(const char * pathname, int flag, int mode)
if (!inode) if (!inode)
goto exit; goto exit;
error = -EACCES; error = -ELOOP;
if (S_ISLNK(inode->i_mode)) if (S_ISLNK(inode->i_mode))
goto exit; goto exit;
......
...@@ -14,6 +14,7 @@ ...@@ -14,6 +14,7 @@
* Copyright (C) 1995, 1996, 1997 Olaf Kirch <okir@monad.swb.de> * Copyright (C) 1995, 1996, 1997 Olaf Kirch <okir@monad.swb.de>
*/ */
#include <linux/config.h>
#include <linux/version.h> #include <linux/version.h>
#include <linux/sched.h> #include <linux/sched.h>
#include <linux/errno.h> #include <linux/errno.h>
......
...@@ -49,7 +49,6 @@ utf8_mbtowc(__u16 *p, const __u8 *s, int n) ...@@ -49,7 +49,6 @@ utf8_mbtowc(__u16 *p, const __u8 *s, int n)
int c0, c, nc; int c0, c, nc;
struct utf8_table *t; struct utf8_table *t;
printk("utf8_mbtowc\n");
nc = 0; nc = 0;
c0 = *s; c0 = *s;
l = c0; l = c0;
...@@ -80,11 +79,9 @@ utf8_mbstowcs(__u16 *pwcs, const __u8 *s, int n) ...@@ -80,11 +79,9 @@ utf8_mbstowcs(__u16 *pwcs, const __u8 *s, int n)
const __u8 *ip; const __u8 *ip;
int size; int size;
printk("\nutf8_mbstowcs: n=%d\n", n);
op = pwcs; op = pwcs;
ip = s; ip = s;
while (*ip && n > 0) { while (*ip && n > 0) {
printk(" %02x", *ip);
if (*ip & 0x80) { if (*ip & 0x80) {
size = utf8_mbtowc(op, ip, n); size = utf8_mbtowc(op, ip, n);
if (size == -1) { if (size == -1) {
......
...@@ -130,22 +130,20 @@ int do_select(int n, fd_set_buffer *fds, unsigned long timeout) ...@@ -130,22 +130,20 @@ int do_select(int n, fd_set_buffer *fds, unsigned long timeout)
int retval; int retval;
int i; int i;
lock_kernel();
wait = NULL; wait = NULL;
current->timeout = timeout; current->timeout = timeout;
if (timeout) { if (timeout) {
struct poll_table_entry *entry = (struct poll_table_entry *) struct poll_table_entry *entry = (struct poll_table_entry *) __get_free_page(GFP_KERNEL);
__get_free_page(GFP_KERNEL); if (!entry)
if (!entry) { return -ENOMEM;
retval = -ENOMEM;
goto out_nowait;
}
wait_table.nr = 0; wait_table.nr = 0;
wait_table.entry = entry; wait_table.entry = entry;
wait = &wait_table; wait = &wait_table;
} }
lock_kernel();
retval = max_select_fd(n, fds); retval = max_select_fd(n, fds);
if (retval < 0) if (retval < 0)
goto out; goto out;
......
...@@ -11,7 +11,6 @@ ...@@ -11,7 +11,6 @@
#ifndef __ASM_MIPS_FLOPPY_H #ifndef __ASM_MIPS_FLOPPY_H
#define __ASM_MIPS_FLOPPY_H #define __ASM_MIPS_FLOPPY_H
#include <linux/config.h>
#include <asm/bootinfo.h> #include <asm/bootinfo.h>
#include <asm/jazz.h> #include <asm/jazz.h>
#include <asm/jazzdma.h> #include <asm/jazzdma.h>
......
...@@ -272,7 +272,7 @@ extern int remap_page_range(unsigned long from, unsigned long to, unsigned long ...@@ -272,7 +272,7 @@ extern int remap_page_range(unsigned long from, unsigned long to, unsigned long
extern int zeromap_page_range(unsigned long from, unsigned long size, pgprot_t prot); extern int zeromap_page_range(unsigned long from, unsigned long size, pgprot_t prot);
extern void vmtruncate(struct inode * inode, unsigned long offset); extern void vmtruncate(struct inode * inode, unsigned long offset);
extern void handle_mm_fault(struct task_struct *tsk,struct vm_area_struct *vma, unsigned long address, int write_access); extern int handle_mm_fault(struct task_struct *tsk,struct vm_area_struct *vma, unsigned long address, int write_access);
extern void make_pages_present(unsigned long addr, unsigned long end); extern void make_pages_present(unsigned long addr, unsigned long end);
extern int pgt_cache_water[2]; extern int pgt_cache_water[2];
...@@ -329,17 +329,10 @@ extern void put_cached_page(unsigned long); ...@@ -329,17 +329,10 @@ extern void put_cached_page(unsigned long);
*/ */
extern int free_memory_available(void); extern int free_memory_available(void);
extern struct task_struct * kswapd_task; extern struct task_struct * kswapd_task;
#define wakeup_kswapd() do { \
extern inline void kswapd_notify(unsigned int gfp_mask) if (kswapd_task->state & TASK_INTERRUPTIBLE) \
{ wake_up_process(kswapd_task); \
if (kswapd_task) { } while (0)
wake_up_process(kswapd_task);
if (gfp_mask & __GFP_WAIT) {
current->policy |= SCHED_YIELD;
schedule();
}
}
}
/* vma is the first one with address < vma->vm_end, /* vma is the first one with address < vma->vm_end,
* and even address < vma->vm_start. Have to extend vma. */ * and even address < vma->vm_start. Have to extend vma. */
......
...@@ -83,7 +83,6 @@ static inline void remove_page_from_hash_queue(struct page * page) ...@@ -83,7 +83,6 @@ static inline void remove_page_from_hash_queue(struct page * page)
static inline void __add_page_to_hash_queue(struct page * page, struct page **p) static inline void __add_page_to_hash_queue(struct page * page, struct page **p)
{ {
page_cache_size++; page_cache_size++;
set_bit(PG_referenced, &page->flags);
page->age = PAGE_AGE_VALUE; page->age = PAGE_AGE_VALUE;
if((page->next_hash = *p) != NULL) if((page->next_hash = *p) != NULL)
(*p)->pprev_hash = &page->next_hash; (*p)->pprev_hash = &page->next_hash;
......
...@@ -208,7 +208,7 @@ struct parport { ...@@ -208,7 +208,7 @@ struct parport {
int number; /* port index - the `n' in `parportn' */ int number; /* port index - the `n' in `parportn' */
spinlock_t pardevice_lock; spinlock_t pardevice_lock;
spinlock_t waitlist_lock; spinlock_t waitlist_lock;
spinlock_t cad_lock; rwlock_t cad_lock;
}; };
/* parport_register_port registers a new parallel port at the given address (if /* parport_register_port registers a new parallel port at the given address (if
......
...@@ -293,7 +293,7 @@ static inline void add_to_page_cache(struct page * page, ...@@ -293,7 +293,7 @@ static inline void add_to_page_cache(struct page * page,
struct page **hash) struct page **hash)
{ {
atomic_inc(&page->count); atomic_inc(&page->count);
page->flags &= ~((1 << PG_uptodate) | (1 << PG_error)); page->flags = (page->flags & ~((1 << PG_uptodate) | (1 << PG_error))) | (1 << PG_referenced);
page->offset = offset; page->offset = offset;
add_page_to_inode_queue(inode, page); add_page_to_inode_queue(inode, page);
__add_page_to_hash_queue(page, hash); __add_page_to_hash_queue(page, hash);
...@@ -328,7 +328,6 @@ static unsigned long try_to_read_ahead(struct file * file, ...@@ -328,7 +328,6 @@ static unsigned long try_to_read_ahead(struct file * file,
*/ */
page = mem_map + MAP_NR(page_cache); page = mem_map + MAP_NR(page_cache);
add_to_page_cache(page, inode, offset, hash); add_to_page_cache(page, inode, offset, hash);
set_bit(PG_referenced, &page->flags);
inode->i_op->readpage(file, page); inode->i_op->readpage(file, page);
page_cache = 0; page_cache = 0;
} }
......
...@@ -629,7 +629,7 @@ unsigned long put_dirty_page(struct task_struct * tsk, unsigned long page, unsig ...@@ -629,7 +629,7 @@ unsigned long put_dirty_page(struct task_struct * tsk, unsigned long page, unsig
* change only once the write actually happens. This avoids a few races, * change only once the write actually happens. This avoids a few races,
* and potentially makes it more efficient. * and potentially makes it more efficient.
*/ */
static void do_wp_page(struct task_struct * tsk, struct vm_area_struct * vma, static int do_wp_page(struct task_struct * tsk, struct vm_area_struct * vma,
unsigned long address, pte_t *page_table) unsigned long address, pte_t *page_table)
{ {
pte_t pte; pte_t pte;
...@@ -665,30 +665,31 @@ static void do_wp_page(struct task_struct * tsk, struct vm_area_struct * vma, ...@@ -665,30 +665,31 @@ static void do_wp_page(struct task_struct * tsk, struct vm_area_struct * vma,
set_pte(page_table, pte_mkwrite(pte_mkdirty(mk_pte(new_page, vma->vm_page_prot)))); set_pte(page_table, pte_mkwrite(pte_mkdirty(mk_pte(new_page, vma->vm_page_prot))));
free_page(old_page); free_page(old_page);
flush_tlb_page(vma, address); flush_tlb_page(vma, address);
return; return 1;
} }
flush_cache_page(vma, address); flush_cache_page(vma, address);
set_pte(page_table, BAD_PAGE); set_pte(page_table, BAD_PAGE);
flush_tlb_page(vma, address); flush_tlb_page(vma, address);
free_page(old_page); free_page(old_page);
oom(tsk); oom(tsk);
return; return 0;
} }
if (PageSwapCache(page_map)) if (PageSwapCache(page_map))
delete_from_swap_cache(page_map); delete_from_swap_cache(page_map);
flush_cache_page(vma, address); flush_cache_page(vma, address);
set_pte(page_table, pte_mkdirty(pte_mkwrite(pte))); set_pte(page_table, pte_mkdirty(pte_mkwrite(pte)));
flush_tlb_page(vma, address); flush_tlb_page(vma, address);
end_wp_page:
if (new_page) if (new_page)
free_page(new_page); free_page(new_page);
return; return 1;
bad_wp_page: bad_wp_page:
printk("do_wp_page: bogus page at address %08lx (%08lx)\n",address,old_page); printk("do_wp_page: bogus page at address %08lx (%08lx)\n",address,old_page);
send_sig(SIGKILL, tsk, 1); send_sig(SIGKILL, tsk, 1);
end_wp_page:
if (new_page) if (new_page)
free_page(new_page); free_page(new_page);
return; return 0;
} }
/* /*
...@@ -777,22 +778,19 @@ void vmtruncate(struct inode * inode, unsigned long offset) ...@@ -777,22 +778,19 @@ void vmtruncate(struct inode * inode, unsigned long offset)
} }
static inline void do_swap_page(struct task_struct * tsk, static int do_swap_page(struct task_struct * tsk,
struct vm_area_struct * vma, unsigned long address, struct vm_area_struct * vma, unsigned long address,
pte_t * page_table, pte_t entry, int write_access) pte_t * page_table, pte_t entry, int write_access)
{ {
pte_t page; lock_kernel();
if (!vma->vm_ops || !vma->vm_ops->swapin) { if (!vma->vm_ops || !vma->vm_ops->swapin) {
swap_in(tsk, vma, page_table, pte_val(entry), write_access); swap_in(tsk, vma, page_table, pte_val(entry), write_access);
flush_page_to_ram(pte_page(*page_table)); flush_page_to_ram(pte_page(*page_table));
return; } else {
} pte_t page = vma->vm_ops->swapin(vma, address - vma->vm_start + vma->vm_offset, pte_val(entry));
page = vma->vm_ops->swapin(vma, address - vma->vm_start + vma->vm_offset, pte_val(entry));
if (pte_val(*page_table) != pte_val(entry)) { if (pte_val(*page_table) != pte_val(entry)) {
free_page(pte_page(page)); free_page(pte_page(page));
return; } else {
}
if (atomic_read(&mem_map[MAP_NR(pte_page(page))].count) > 1 && if (atomic_read(&mem_map[MAP_NR(pte_page(page))].count) > 1 &&
!(vma->vm_flags & VM_SHARED)) !(vma->vm_flags & VM_SHARED))
page = pte_wrprotect(page); page = pte_wrprotect(page);
...@@ -800,7 +798,30 @@ static inline void do_swap_page(struct task_struct * tsk, ...@@ -800,7 +798,30 @@ static inline void do_swap_page(struct task_struct * tsk,
++tsk->maj_flt; ++tsk->maj_flt;
flush_page_to_ram(pte_page(page)); flush_page_to_ram(pte_page(page));
set_pte(page_table, page); set_pte(page_table, page);
return; }
}
unlock_kernel();
return 1;
}
/*
* This only needs the MM semaphore
*/
static int do_anonymous_page(struct task_struct * tsk, struct vm_area_struct * vma, pte_t *page_table, int write_access)
{
pte_t entry = pte_wrprotect(mk_pte(ZERO_PAGE, vma->vm_page_prot));
if (write_access) {
unsigned long page = __get_free_page(GFP_KERNEL);
if (!page)
return 0;
clear_page(page);
entry = pte_mkwrite(pte_mkdirty(mk_pte(page, vma->vm_page_prot)));
vma->vm_mm->rss++;
tsk->min_flt++;
flush_page_to_ram(page);
}
put_page(page_table, entry);
return 1;
} }
/* /*
...@@ -811,26 +832,33 @@ static inline void do_swap_page(struct task_struct * tsk, ...@@ -811,26 +832,33 @@ static inline void do_swap_page(struct task_struct * tsk,
* *
* As this is called only for pages that do not currently exist, we * As this is called only for pages that do not currently exist, we
* do not need to flush old virtual caches or the TLB. * do not need to flush old virtual caches or the TLB.
*
* This is called with the MM semaphore held, but without the kernel
* lock.
*/ */
static void do_no_page(struct task_struct * tsk, struct vm_area_struct * vma, static int do_no_page(struct task_struct * tsk, struct vm_area_struct * vma,
unsigned long address, int write_access, pte_t *page_table, pte_t entry) unsigned long address, int write_access, pte_t *page_table)
{ {
unsigned long page; unsigned long page;
pte_t entry;
if (!pte_none(entry))
goto swap_page;
address &= PAGE_MASK;
if (!vma->vm_ops || !vma->vm_ops->nopage) if (!vma->vm_ops || !vma->vm_ops->nopage)
goto anonymous_page; return do_anonymous_page(tsk, vma, page_table, write_access);
/* /*
* The third argument is "no_share", which tells the low-level code * The third argument is "no_share", which tells the low-level code
* to copy, not share the page even if sharing is possible. It's * to copy, not share the page even if sharing is possible. It's
* essentially an early COW detection * essentially an early COW detection.
*
* We need to grab the kernel lock for this..
*/ */
page = vma->vm_ops->nopage(vma, address, lock_kernel();
page = vma->vm_ops->nopage(vma, address & PAGE_MASK,
(vma->vm_flags & VM_SHARED)?0:write_access); (vma->vm_flags & VM_SHARED)?0:write_access);
unlock_kernel();
if (!page) if (!page)
goto sigbus; return 0;
++tsk->maj_flt; ++tsk->maj_flt;
++vma->vm_mm->rss; ++vma->vm_mm->rss;
/* /*
...@@ -852,32 +880,7 @@ static void do_no_page(struct task_struct * tsk, struct vm_area_struct * vma, ...@@ -852,32 +880,7 @@ static void do_no_page(struct task_struct * tsk, struct vm_area_struct * vma,
entry = pte_wrprotect(entry); entry = pte_wrprotect(entry);
put_page(page_table, entry); put_page(page_table, entry);
/* no need to invalidate: a not-present page shouldn't be cached */ /* no need to invalidate: a not-present page shouldn't be cached */
return; return 1;
anonymous_page:
entry = pte_wrprotect(mk_pte(ZERO_PAGE, vma->vm_page_prot));
if (write_access) {
unsigned long page = __get_free_page(GFP_KERNEL);
if (!page)
goto sigbus;
clear_page(page);
entry = pte_mkwrite(pte_mkdirty(mk_pte(page, vma->vm_page_prot)));
vma->vm_mm->rss++;
tsk->min_flt++;
flush_page_to_ram(page);
}
put_page(page_table, entry);
return;
sigbus:
force_sig(SIGBUS, current);
put_page(page_table, BAD_PAGE);
/* no need to invalidate, wasn't present */
return;
swap_page:
do_swap_page(tsk, vma, address, page_table, entry, write_access);
return;
} }
/* /*
...@@ -889,54 +892,54 @@ static void do_no_page(struct task_struct * tsk, struct vm_area_struct * vma, ...@@ -889,54 +892,54 @@ static void do_no_page(struct task_struct * tsk, struct vm_area_struct * vma,
* with external mmu caches can use to update those (ie the Sparc or * with external mmu caches can use to update those (ie the Sparc or
* PowerPC hashed page tables that act as extended TLBs). * PowerPC hashed page tables that act as extended TLBs).
*/ */
static inline void handle_pte_fault(struct task_struct *tsk, static inline int handle_pte_fault(struct task_struct *tsk,
struct vm_area_struct * vma, unsigned long address, struct vm_area_struct * vma, unsigned long address,
int write_access, pte_t * pte) int write_access, pte_t * pte)
{ {
pte_t entry = *pte; pte_t entry = *pte;
if (!pte_present(entry)) { if (!pte_present(entry)) {
do_no_page(tsk, vma, address, write_access, pte, entry); if (pte_none(entry))
return; return do_no_page(tsk, vma, address, write_access, pte);
return do_swap_page(tsk, vma, address, pte, entry, write_access);
} }
entry = pte_mkyoung(entry); entry = pte_mkyoung(entry);
set_pte(pte, entry); set_pte(pte, entry);
flush_tlb_page(vma, address); flush_tlb_page(vma, address);
if (!write_access) if (!write_access)
return; return 1;
if (pte_write(entry)) { if (pte_write(entry)) {
entry = pte_mkdirty(entry); entry = pte_mkdirty(entry);
set_pte(pte, entry); set_pte(pte, entry);
flush_tlb_page(vma, address); flush_tlb_page(vma, address);
return; return 1;
} }
do_wp_page(tsk, vma, address, pte); return do_wp_page(tsk, vma, address, pte);
} }
/* /*
* By the time we get here, we already hold the mm semaphore * By the time we get here, we already hold the mm semaphore
*/ */
void handle_mm_fault(struct task_struct *tsk, struct vm_area_struct * vma, int handle_mm_fault(struct task_struct *tsk, struct vm_area_struct * vma,
unsigned long address, int write_access) unsigned long address, int write_access)
{ {
pgd_t *pgd; pgd_t *pgd;
pmd_t *pmd; pmd_t *pmd;
pte_t *pte;
pgd = pgd_offset(vma->vm_mm, address); pgd = pgd_offset(vma->vm_mm, address);
pmd = pmd_alloc(pgd, address); pmd = pmd_alloc(pgd, address);
if (!pmd) if (pmd) {
goto no_memory; pte_t * pte = pte_alloc(pmd, address);
pte = pte_alloc(pmd, address); if (pte) {
if (!pte) if (handle_pte_fault(tsk, vma, address, write_access, pte)) {
goto no_memory;
lock_kernel();
handle_pte_fault(tsk, vma, address, write_access, pte);
unlock_kernel();
update_mmu_cache(vma, address, *pte); update_mmu_cache(vma, address, *pte);
return; return 1;
no_memory: }
oom(tsk); }
}
return 0;
} }
/* /*
......
...@@ -269,11 +269,16 @@ unsigned long __get_free_pages(int gfp_mask, unsigned long order) ...@@ -269,11 +269,16 @@ unsigned long __get_free_pages(int gfp_mask, unsigned long order)
/* /*
* If we failed to find anything, we'll return NULL, but we'll * If we failed to find anything, we'll return NULL, but we'll
* wake up kswapd _now_ and even wait for it synchronously if * wake up kswapd _now_ and even yield to it if we can..
* we can.. This way we'll at least make some forward progress * This way we'll at least make some forward progress
* over time. * over time.
*/ */
kswapd_notify(gfp_mask); wakeup_kswapd();
if (gfp_mask & __GFP_WAIT) {
current->policy |= SCHED_YIELD;
schedule();
}
nopage: nopage:
return 0; return 0;
} }
......
...@@ -118,8 +118,13 @@ static inline int try_to_swap_out(struct task_struct * tsk, struct vm_area_struc ...@@ -118,8 +118,13 @@ static inline int try_to_swap_out(struct task_struct * tsk, struct vm_area_struc
} }
if (pte_young(pte)) { if (pte_young(pte)) {
/*
* Transfer the "accessed" bit from the page
* tables to the global page map.
*/
set_pte(page_table, pte_mkold(pte)); set_pte(page_table, pte_mkold(pte));
touch_page(page_map); set_bit(PG_referenced, &page_map->flags);
/* /*
* We should test here to see if we want to recover any * We should test here to see if we want to recover any
* swap cache page here. We do this if the page seeing * swap cache page here. We do this if the page seeing
...@@ -132,10 +137,6 @@ static inline int try_to_swap_out(struct task_struct * tsk, struct vm_area_struc ...@@ -132,10 +137,6 @@ static inline int try_to_swap_out(struct task_struct * tsk, struct vm_area_struc
return 0; return 0;
} }
age_page(page_map);
if (page_map->age)
return 0;
if (pte_dirty(pte)) { if (pte_dirty(pte)) {
if (vma->vm_ops && vma->vm_ops->swapout) { if (vma->vm_ops && vma->vm_ops->swapout) {
pid_t pid = tsk->pid; pid_t pid = tsk->pid;
...@@ -305,8 +306,9 @@ static inline int swap_out_pgd(struct task_struct * tsk, struct vm_area_struct * ...@@ -305,8 +306,9 @@ static inline int swap_out_pgd(struct task_struct * tsk, struct vm_area_struct *
} }
static int swap_out_vma(struct task_struct * tsk, struct vm_area_struct * vma, static int swap_out_vma(struct task_struct * tsk, struct vm_area_struct * vma,
pgd_t *pgdir, unsigned long start, int gfp_mask) unsigned long address, int gfp_mask)
{ {
pgd_t *pgdir;
unsigned long end; unsigned long end;
/* Don't swap out areas like shared memory which have their /* Don't swap out areas like shared memory which have their
...@@ -314,12 +316,14 @@ static int swap_out_vma(struct task_struct * tsk, struct vm_area_struct * vma, ...@@ -314,12 +316,14 @@ static int swap_out_vma(struct task_struct * tsk, struct vm_area_struct * vma,
if (vma->vm_flags & (VM_SHM | VM_LOCKED)) if (vma->vm_flags & (VM_SHM | VM_LOCKED))
return 0; return 0;
pgdir = pgd_offset(tsk->mm, address);
end = vma->vm_end; end = vma->vm_end;
while (start < end) { while (address < end) {
int result = swap_out_pgd(tsk, vma, pgdir, start, end, gfp_mask); int result = swap_out_pgd(tsk, vma, pgdir, address, end, gfp_mask);
if (result) if (result)
return result; return result;
start = (start + PGDIR_SIZE) & PGDIR_MASK; address = (address + PGDIR_SIZE) & PGDIR_MASK;
pgdir++; pgdir++;
} }
return 0; return 0;
...@@ -339,15 +343,12 @@ static int swap_out_process(struct task_struct * p, int gfp_mask) ...@@ -339,15 +343,12 @@ static int swap_out_process(struct task_struct * p, int gfp_mask)
* Find the proper vm-area * Find the proper vm-area
*/ */
vma = find_vma(p->mm, address); vma = find_vma(p->mm, address);
if (!vma) { if (vma) {
p->swap_address = 0;
return 0;
}
if (address < vma->vm_start) if (address < vma->vm_start)
address = vma->vm_start; address = vma->vm_start;
for (;;) { for (;;) {
int result = swap_out_vma(p, vma, pgd_offset(p->mm, address), address, gfp_mask); int result = swap_out_vma(p, vma, address, gfp_mask);
if (result) if (result)
return result; return result;
vma = vma->vm_next; vma = vma->vm_next;
...@@ -355,6 +356,10 @@ static int swap_out_process(struct task_struct * p, int gfp_mask) ...@@ -355,6 +356,10 @@ static int swap_out_process(struct task_struct * p, int gfp_mask)
break; break;
address = vma->vm_start; address = vma->vm_start;
} }
}
/* We didn't find anything for the process */
p->swap_cnt = 0;
p->swap_address = 0; p->swap_address = 0;
return 0; return 0;
} }
...@@ -415,20 +420,12 @@ static int swap_out(unsigned int priority, int gfp_mask) ...@@ -415,20 +420,12 @@ static int swap_out(unsigned int priority, int gfp_mask)
} }
pbest->swap_cnt--; pbest->swap_cnt--;
switch (swap_out_process(pbest, gfp_mask)) {
case 0:
/* /*
* Clear swap_cnt so we don't look at this task * Nonzero means we cleared out something, but only "1" means
* again until we've tried all of the others. * that we actually free'd up a page as a result.
* (We didn't block, so the task is still here.)
*/ */
pbest->swap_cnt = 0; if (swap_out_process(pbest, gfp_mask) == 1)
break;
case 1:
return 1; return 1;
default:
break;
};
} }
out: out:
return 0; return 0;
...@@ -540,7 +537,7 @@ int kswapd(void *unused) ...@@ -540,7 +537,7 @@ int kswapd(void *unused)
init_swap_timer(); init_swap_timer();
kswapd_task = current; kswapd_task = current;
while (1) { while (1) {
int tries; unsigned long start_time;
current->state = TASK_INTERRUPTIBLE; current->state = TASK_INTERRUPTIBLE;
flush_signals(current); flush_signals(current);
...@@ -548,36 +545,12 @@ int kswapd(void *unused) ...@@ -548,36 +545,12 @@ int kswapd(void *unused)
schedule(); schedule();
swapstats.wakeups++; swapstats.wakeups++;
/* start_time = jiffies;
* Do the background pageout: be
* more aggressive if we're really
* low on free memory.
*
* We try page_daemon.tries_base times, divided by
* an 'urgency factor'. In practice this will mean
* a value of pager_daemon.tries_base / 8 or 4 = 64
* or 128 pages at a time.
* This gives us 64 (or 128) * 4k * 4 (times/sec) =
* 1 (or 2) MB/s swapping bandwidth in low-priority
* background paging. This number rises to 8 MB/s
* when the priority is highest (but then we'll be
* woken up more often and the rate will be even
* higher).
*/
tries = pager_daemon.tries_base;
tries >>= 4*free_memory_available();
do { do {
do_try_to_free_page(0); do_try_to_free_page(0);
/*
* Syncing large chunks is faster than swapping
* synchronously (less head movement). -- Rik.
*/
if (atomic_read(&nr_async_pages) >= pager_daemon.swap_cluster)
run_task_queue(&tq_disk);
if (free_memory_available() > 1) if (free_memory_available() > 1)
break; break;
} while (--tries > 0); } while (jiffies != start_time);
} }
/* As if we could ever get here - maybe we want to make this killable */ /* As if we could ever get here - maybe we want to make this killable */
kswapd_task = NULL; kswapd_task = NULL;
......
...@@ -53,6 +53,9 @@ ...@@ -53,6 +53,9 @@
# #
# 090398 Axel Boldt (boldt@math.ucsb.edu) - allow for empty lines in help # 090398 Axel Boldt (boldt@math.ucsb.edu) - allow for empty lines in help
# texts. # texts.
#
# 102598 Michael Chastain (mec@shout.net) - put temporary files in
# current directory, not in /tmp.
# #
# Make sure we're really running bash. # Make sure we're really running bash.
...@@ -506,9 +509,9 @@ if [ -f $DEFAULTS ]; then ...@@ -506,9 +509,9 @@ if [ -f $DEFAULTS ]; then
echo "# Using defaults found in" $DEFAULTS echo "# Using defaults found in" $DEFAULTS
echo "#" echo "#"
. $DEFAULTS . $DEFAULTS
sed -e 's/# \(.*\) is not.*/\1=n/' < $DEFAULTS > /tmp/conf.$$ sed -e 's/# \(.*\) is not.*/\1=n/' < $DEFAULTS > .config-is-not.$$
. /tmp/conf.$$ . .config-is-not.$$
rm /tmp/conf.$$ rm .config-is-not.$$
else else
echo "#" echo "#"
echo "# No defaults found" echo "# No defaults found"
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment