Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
nexedi
linux
Commits
0c7f4371
Commit
0c7f4371
authored
Feb 18, 2002
by
Linus Torvalds
Browse files
Options
Browse Files
Download
Plain Diff
Merge home.transmeta.com:/home/torvalds/v2.5/small-page
into home.transmeta.com:/home/torvalds/v2.5/linux
parents
991f6b0a
e5191c50
Changes
27
Hide whitespace changes
Inline
Side-by-side
Showing
27 changed files
with
313 additions
and
98 deletions
+313
-98
drivers/char/agp/agpgart_be.c
drivers/char/agp/agpgart_be.c
+2
-2
drivers/char/drm/i810_dma.c
drivers/char/drm/i810_dma.c
+7
-8
fs/buffer.c
fs/buffer.c
+3
-4
include/asm-alpha/pgtable.h
include/asm-alpha/pgtable.h
+0
-2
include/asm-arm/pgtable.h
include/asm-arm/pgtable.h
+0
-1
include/asm-cris/pgtable.h
include/asm-cris/pgtable.h
+0
-1
include/asm-i386/pgtable.h
include/asm-i386/pgtable.h
+1
-5
include/asm-ia64/pgtable.h
include/asm-ia64/pgtable.h
+0
-5
include/asm-mips/pgtable.h
include/asm-mips/pgtable.h
+0
-5
include/asm-mips64/pgtable.h
include/asm-mips64/pgtable.h
+0
-5
include/asm-parisc/pgtable.h
include/asm-parisc/pgtable.h
+0
-1
include/asm-ppc/pgtable.h
include/asm-ppc/pgtable.h
+0
-4
include/asm-s390/pgtable.h
include/asm-s390/pgtable.h
+0
-4
include/asm-s390x/pgtable.h
include/asm-s390x/pgtable.h
+0
-4
include/asm-sh/pgtable.h
include/asm-sh/pgtable.h
+0
-5
include/asm-sparc/pgtable.h
include/asm-sparc/pgtable.h
+0
-3
include/asm-sparc64/page.h
include/asm-sparc64/page.h
+3
-0
include/asm-sparc64/pgtable.h
include/asm-sparc64/pgtable.h
+1
-2
include/asm-x86_64/pgtable.h
include/asm-x86_64/pgtable.h
+0
-1
include/linux/mm.h
include/linux/mm.h
+74
-3
include/linux/mmzone.h
include/linux/mmzone.h
+34
-0
include/linux/pagemap.h
include/linux/pagemap.h
+2
-0
mm/Makefile
mm/Makefile
+1
-1
mm/filemap.c
mm/filemap.c
+81
-9
mm/highmem.c
mm/highmem.c
+1
-1
mm/page_alloc.c
mm/page_alloc.c
+101
-20
mm/vmscan.c
mm/vmscan.c
+2
-2
No files found.
drivers/char/agp/agpgart_be.c
View file @
0c7f4371
...
...
@@ -830,7 +830,7 @@ static void agp_generic_destroy_page(unsigned long addr)
page
=
virt_to_page
(
pt
);
atomic_dec
(
&
page
->
count
);
clear_bit
(
PG_locked
,
&
page
->
flags
);
wake_up
(
&
page
->
wait
);
wake_up
_page
(
page
);
free_page
((
unsigned
long
)
pt
);
atomic_dec
(
&
agp_bridge
.
current_memory_agp
);
}
...
...
@@ -2828,7 +2828,7 @@ static void ali_destroy_page(unsigned long addr)
page
=
virt_to_page
(
pt
);
atomic_dec
(
&
page
->
count
);
clear_bit
(
PG_locked
,
&
page
->
flags
);
wake_up
(
&
page
->
wait
);
wake_up
_page
(
page
);
free_page
((
unsigned
long
)
pt
);
atomic_dec
(
&
agp_bridge
.
current_memory_agp
);
}
...
...
drivers/char/drm/i810_dma.c
View file @
0c7f4371
...
...
@@ -294,14 +294,13 @@ static unsigned long i810_alloc_page(drm_device_t *dev)
static
void
i810_free_page
(
drm_device_t
*
dev
,
unsigned
long
page
)
{
if
(
page
==
0UL
)
return
;
atomic_dec
(
&
virt_to_page
(
page
)
->
count
);
clear_bit
(
PG_locked
,
&
virt_to_page
(
page
)
->
flags
);
wake_up
(
&
virt_to_page
(
page
)
->
wait
);
free_page
(
page
);
return
;
if
(
page
)
{
struct
page
*
p
=
virt_to_page
(
page
);
atomic_dec
(
p
);
clear_bit
(
PG_locked
,
&
p
->
flags
);
wake_up_page
(
p
);
free_page
(
page
);
}
}
static
int
i810_dma_cleanup
(
drm_device_t
*
dev
)
...
...
fs/buffer.c
View file @
0c7f4371
...
...
@@ -2115,8 +2115,7 @@ int generic_direct_IO(int rw, struct inode * inode, struct kiobuf * iobuf, unsig
* of kiobuf structs (much like a user-space iovec list).
*
* The kiobuf must already be locked for IO. IO is submitted
* asynchronously: you need to check page->locked, page->uptodate, and
* maybe wait on page->wait.
* asynchronously: you need to check page->locked and page->uptodate.
*
* It is up to the caller to make sure that there are enough blocks
* passed in to completely map the iobufs to disk.
...
...
@@ -2173,8 +2172,8 @@ int brw_kiovec(int rw, int nr, struct kiobuf *iovec[], kdev_t dev, sector_t b[],
/*
* Start I/O on a page.
* This function expects the page to be locked and may return
* before I/O is complete. You then have to check page->locked
,
*
page->uptodate, and maybe wait on page->wait
.
* before I/O is complete. You then have to check page->locked
*
and page->uptodate
.
*
* brw_page() is SMP-safe, although it's being called with the
* kernel lock held - but the code is ready.
...
...
include/asm-alpha/pgtable.h
View file @
0c7f4371
...
...
@@ -268,8 +268,6 @@ extern inline int pgd_bad(pgd_t pgd) { return (pgd_val(pgd) & ~_PFN_MASK) != _P
extern
inline
int
pgd_present
(
pgd_t
pgd
)
{
return
pgd_val
(
pgd
)
&
_PAGE_VALID
;
}
extern
inline
void
pgd_clear
(
pgd_t
*
pgdp
)
{
pgd_val
(
*
pgdp
)
=
0
;
}
#define page_address(page) ((page)->virtual)
/*
* The following only work if pte_present() is true.
* Undefined behaviour if not..
...
...
include/asm-arm/pgtable.h
View file @
0c7f4371
...
...
@@ -99,7 +99,6 @@ extern struct page *empty_zero_page;
/*
* Permanent address of a page. We never have highmem, so this is trivial.
*/
#define page_address(page) ((page)->virtual)
#define pages_to_mb(x) ((x) >> (20 - PAGE_SHIFT))
/*
...
...
include/asm-cris/pgtable.h
View file @
0c7f4371
...
...
@@ -439,7 +439,6 @@ static inline unsigned long __pte_page(pte_t pte)
/* permanent address of a page */
#define page_address(page) ((page)->virtual)
#define __page_address(page) (PAGE_OFFSET + (((page) - mem_map) << PAGE_SHIFT))
#define pte_page(pte) (mem_map+pte_pagenr(pte))
...
...
include/asm-i386/pgtable.h
View file @
0c7f4371
...
...
@@ -264,11 +264,7 @@ extern unsigned long pg0[1024];
#define pmd_clear(xp) do { set_pmd(xp, __pmd(0)); } while (0)
#define pmd_bad(x) ((pmd_val(x) & (~PAGE_MASK & ~_PAGE_USER)) != _KERNPG_TABLE)
/*
* Permanent address of a page. Obviously must never be
* called on a highmem page.
*/
#define page_address(page) ((page)->virtual)
#define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT))
/*
...
...
include/asm-ia64/pgtable.h
View file @
0c7f4371
...
...
@@ -165,11 +165,6 @@
* addresses:
*/
/*
* Given a pointer to an mem_map[] entry, return the kernel virtual
* address corresponding to that page.
*/
#define page_address(page) ((page)->virtual)
/* Quick test to see if ADDR is a (potentially) valid physical address. */
static
inline
long
...
...
include/asm-mips/pgtable.h
View file @
0c7f4371
...
...
@@ -331,11 +331,6 @@ extern inline int pgd_bad(pgd_t pgd) { return 0; }
extern
inline
int
pgd_present
(
pgd_t
pgd
)
{
return
1
;
}
extern
inline
void
pgd_clear
(
pgd_t
*
pgdp
)
{
}
/*
* Permanent address of a page. On MIPS we never have highmem, so this
* is simple.
*/
#define page_address(page) ((page)->virtual)
#ifdef CONFIG_CPU_VR41XX
#define pte_page(x) (mem_map+(unsigned long)((pte_val(x) >> (PAGE_SHIFT + 2))))
#else
...
...
include/asm-mips64/pgtable.h
View file @
0c7f4371
...
...
@@ -370,11 +370,6 @@ extern inline void pgd_clear(pgd_t *pgdp)
pgd_val
(
*
pgdp
)
=
((
unsigned
long
)
invalid_pmd_table
);
}
/*
* Permanent address of a page. On MIPS64 we never have highmem, so this
* is simple.
*/
#define page_address(page) ((page)->virtual)
#ifndef CONFIG_DISCONTIGMEM
#define pte_page(x) (mem_map+(unsigned long)((pte_val(x) >> PAGE_SHIFT)))
#else
...
...
include/asm-parisc/pgtable.h
View file @
0c7f4371
...
...
@@ -275,7 +275,6 @@ extern inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
* Permanent address of a page. Obviously must never be
* called on a highmem page.
*/
#define page_address(page) ({ if (!(page)->virtual) BUG(); (page)->virtual; })
#define __page_address(page) ({ if (PageHighMem(page)) BUG(); PAGE_OFFSET + (((page) - mem_map) << PAGE_SHIFT); })
#define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT))
#define pte_page(x) (mem_map+pte_pagenr(x))
...
...
include/asm-ppc/pgtable.h
View file @
0c7f4371
...
...
@@ -389,10 +389,6 @@ extern unsigned long empty_zero_page[1024];
#define pmd_present(pmd) ((pmd_val(pmd) & PAGE_MASK) != 0)
#define pmd_clear(pmdp) do { pmd_val(*(pmdp)) = 0; } while (0)
/*
* Permanent address of a page.
*/
#define page_address(page) ((page)->virtual)
#define pte_page(x) (mem_map+(unsigned long)((pte_val(x)-PPC_MEMSTART) >> PAGE_SHIFT))
#ifndef __ASSEMBLY__
...
...
include/asm-s390/pgtable.h
View file @
0c7f4371
...
...
@@ -239,10 +239,6 @@ extern inline void set_pte(pte_t *pteptr, pte_t pteval)
*
pteptr
=
pteval
;
}
/*
* Permanent address of a page.
*/
#define page_address(page) ((page)->virtual)
#define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT))
/*
...
...
include/asm-s390x/pgtable.h
View file @
0c7f4371
...
...
@@ -234,10 +234,6 @@ extern inline void set_pte(pte_t *pteptr, pte_t pteval)
*
pteptr
=
pteval
;
}
/*
* Permanent address of a page.
*/
#define page_address(page) ((page)->virtual)
#define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT))
/*
...
...
include/asm-sh/pgtable.h
View file @
0c7f4371
...
...
@@ -208,11 +208,6 @@ extern unsigned long empty_zero_page[1024];
#define pmd_clear(xp) do { set_pmd(xp, __pmd(0)); } while (0)
#define pmd_bad(x) ((pmd_val(x) & (~PAGE_MASK & ~_PAGE_USER)) != _KERNPG_TABLE)
/*
* Permanent address of a page. Obviously must never be
* called on a highmem page.
*/
#define page_address(page) ((page)->virtual)
/* P1 address of the page */
#define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT))
#define pte_page(x) phys_to_page(pte_val(x)&PTE_PHYS_MASK)
...
...
include/asm-sparc/pgtable.h
View file @
0c7f4371
...
...
@@ -293,9 +293,6 @@ BTFIXUPDEF_CALL_CONST(pte_t, pte_mkyoung, pte_t)
#define page_pte_prot(page, prot) mk_pte(page, prot)
#define page_pte(page) page_pte_prot(page, __pgprot(0))
/* Permanent address of a page. */
#define page_address(page) ((page)->virtual)
BTFIXUPDEF_CALL
(
struct
page
*
,
pte_page
,
pte_t
)
#define pte_page(pte) BTFIXUP_CALL(pte_page)(pte)
...
...
include/asm-sparc64/page.h
View file @
0c7f4371
...
...
@@ -30,6 +30,9 @@ extern void do_BUG(const char *file, int line);
#define PAGE_BUG(page) BUG()
/* Sparc64 is slow at multiplication, we prefer to use some extra space. */
#define WANT_PAGE_VIRTUAL 1
extern
void
_clear_page
(
void
*
page
);
#define clear_page(X) _clear_page((void *)(X))
extern
void
clear_user_page
(
void
*
page
,
unsigned
long
vaddr
);
...
...
include/asm-sparc64/pgtable.h
View file @
0c7f4371
...
...
@@ -243,8 +243,7 @@ extern inline pte_t pte_modify(pte_t orig_pte, pgprot_t new_prot)
#define pte_mkold(pte) (__pte(((pte_val(pte)<<1UL)>>1UL) & ~_PAGE_ACCESSED))
/* Permanent address of a page. */
#define __page_address(page) ((page)->virtual)
#define page_address(page) ({ __page_address(page); })
#define __page_address(page) page_address(page)
#define pte_page(x) (mem_map+(((pte_val(x)&_PAGE_PADDR)-phys_base)>>PAGE_SHIFT))
...
...
include/asm-x86_64/pgtable.h
View file @
0c7f4371
...
...
@@ -289,7 +289,6 @@ extern void __handle_bad_pmd_kernel(pmd_t * pmd);
* Permanent address of a page. Obviously must never be
* called on a highmem page.
*/
#define page_address(page) ((page)->virtual)
#define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT))
/* FIXME: is this
right? */
#define pte_page(x) (mem_map+((unsigned long)((pte_val(x) >> PAGE_SHIFT))))
...
...
include/linux/mm.h
View file @
0c7f4371
...
...
@@ -157,12 +157,23 @@ typedef struct page {
updated asynchronously */
struct
list_head
lru
;
/* Pageout list, eg. active_list;
protected by pagemap_lru_lock !! */
wait_queue_head_t
wait
;
/* Page locked? Stand in line... */
struct
page
**
pprev_hash
;
/* Complement to *next_hash. */
struct
buffer_head
*
buffers
;
/* Buffer maps us to a disk block. */
/*
* On machines where all RAM is mapped into kernel address space,
* we can simply calculate the virtual address. On machines with
* highmem some memory is mapped into kernel virtual memory
* dynamically, so we need a place to store that address.
* Note that this field could be 16 bits on x86 ... ;)
*
* Architectures with slow multiplication can define
* WANT_PAGE_VIRTUAL in asm/page.h
*/
#if defined(CONFIG_HIGHMEM) || defined(WANT_PAGE_VIRTUAL)
void
*
virtual
;
/* Kernel virtual address (NULL if
not kmapped, ie. highmem) */
struct
zone_struct
*
zone
;
/* Memory zone we are in.
*/
#endif
/* CONFIG_HIGMEM || WANT_PAGE_VIRTUAL
*/
}
mem_map_t
;
/*
...
...
@@ -183,6 +194,11 @@ typedef struct page {
#define page_count(p) atomic_read(&(p)->count)
#define set_page_count(p,v) atomic_set(&(p)->count, v)
static
inline
void
init_page_count
(
struct
page
*
page
)
{
page
->
count
.
counter
=
0
;
}
/*
* Various page->flags bits:
*
...
...
@@ -237,7 +253,7 @@ typedef struct page {
* - private pages which have been modified may need to be swapped out
* to swap space and (later) to be read back into memory.
* During disk I/O, PG_locked is used. This bit is set before I/O
* and reset when I/O completes. page
->wait
is a wait queue of all
* and reset when I/O completes. page
_waitqueue(page)
is a wait queue of all
* tasks waiting for the I/O on this page to complete.
* PG_uptodate tells whether the page's contents is valid.
* When a read completes, the page becomes uptodate, unless a disk I/O
...
...
@@ -299,6 +315,61 @@ typedef struct page {
#define SetPageChecked(page) set_bit(PG_checked, &(page)->flags)
#define PageLaunder(page) test_bit(PG_launder, &(page)->flags)
#define SetPageLaunder(page) set_bit(PG_launder, &(page)->flags)
#define __SetPageReserved(page) __set_bit(PG_reserved, &(page)->flags)
/*
* The zone field is never updated after free_area_init_core()
* sets it, so none of the operations on it need to be atomic.
*/
#define NODE_SHIFT 4
#define ZONE_SHIFT (BITS_PER_LONG - 8)
struct
zone_struct
;
extern
struct
zone_struct
*
zone_table
[];
static
inline
zone_t
*
page_zone
(
struct
page
*
page
)
{
return
zone_table
[
page
->
flags
>>
ZONE_SHIFT
];
}
static
inline
void
set_page_zone
(
struct
page
*
page
,
unsigned
long
zone_num
)
{
page
->
flags
&=
~
(
~
0UL
<<
ZONE_SHIFT
);
page
->
flags
|=
zone_num
<<
ZONE_SHIFT
;
}
/*
* In order to avoid #ifdefs within C code itself, we define
* set_page_address to a noop for non-highmem machines, where
* the field isn't useful.
* The same is true for page_address() in arch-dependent code.
*/
#if defined(CONFIG_HIGHMEM) || defined(WANT_PAGE_VIRTUAL)
#define set_page_address(page, address) \
do { \
(page)->virtual = (address); \
} while(0)
#else
/* CONFIG_HIGHMEM || WANT_PAGE_VIRTUAL */
#define set_page_address(page, address) do { } while(0)
#endif
/* CONFIG_HIGHMEM || WANT_PAGE_VIRTUAL */
/*
* Permanent address of a page. Obviously must never be
* called on a highmem page.
*/
#if defined(CONFIG_HIGHMEM) || defined(WANT_PAGE_VIRTUAL)
#define page_address(page) ((page)->virtual)
#else
/* CONFIG_HIGHMEM || WANT_PAGE_VIRTUAL */
#define page_address(page) \
__va( (((page) - page_zone(page)->zone_mem_map) << PAGE_SHIFT) \
+ page_zone(page)->zone_start_paddr)
#endif
/* CONFIG_HIGHMEM || WANT_PAGE_VIRTUAL */
extern
void
FASTCALL
(
set_page_dirty
(
struct
page
*
));
...
...
include/linux/mmzone.h
View file @
0c7f4371
...
...
@@ -7,6 +7,7 @@
#include <linux/config.h>
#include <linux/spinlock.h>
#include <linux/list.h>
#include <linux/wait.h>
/*
* Free memory management - zoned buddy allocator.
...
...
@@ -47,6 +48,35 @@ typedef struct zone_struct {
*/
free_area_t
free_area
[
MAX_ORDER
];
/*
* wait_table -- the array holding the hash table
* wait_table_size -- the size of the hash table array
* wait_table_shift -- wait_table_size
* == BITS_PER_LONG (1 << wait_table_bits)
*
* The purpose of all these is to keep track of the people
* waiting for a page to become available and make them
* runnable again when possible. The trouble is that this
* consumes a lot of space, especially when so few things
* wait on pages at a given time. So instead of using
* per-page waitqueues, we use a waitqueue hash table.
*
* The bucket discipline is to sleep on the same queue when
* colliding and wake all in that wait queue when removing.
* When something wakes, it must check to be sure its page is
* truly available, a la thundering herd. The cost of a
* collision is great, but given the expected load of the
* table, they should be so rare as to be outweighed by the
* benefits from the saved space.
*
* __wait_on_page() and unlock_page() in mm/filemap.c, are the
* primary users of these fields, and in mm/page_alloc.c
* free_area_init_core() performs the initialization of them.
*/
wait_queue_head_t
*
wait_table
;
unsigned
long
wait_table_size
;
unsigned
long
wait_table_shift
;
/*
* Discontig memory support fields.
*/
...
...
@@ -132,11 +162,15 @@ extern pg_data_t contig_page_data;
#define NODE_DATA(nid) (&contig_page_data)
#define NODE_MEM_MAP(nid) mem_map
#define MAX_NR_NODES 1
#else
/* !CONFIG_DISCONTIGMEM */
#include <asm/mmzone.h>
/* page->zone is currently 8 bits ... */
#define MAX_NR_NODES (255 / MAX_NR_ZONES)
#endif
/* !CONFIG_DISCONTIGMEM */
#define MAP_ALIGN(x) ((((x) % sizeof(mem_map_t)) == 0) ? (x) : ((x) + \
...
...
include/linux/pagemap.h
View file @
0c7f4371
...
...
@@ -97,6 +97,8 @@ static inline void wait_on_page(struct page * page)
___wait_on_page
(
page
);
}
extern
void
wake_up_page
(
struct
page
*
);
extern
struct
page
*
grab_cache_page
(
struct
address_space
*
,
unsigned
long
);
extern
struct
page
*
grab_cache_page_nowait
(
struct
address_space
*
,
unsigned
long
);
...
...
mm/Makefile
View file @
0c7f4371
...
...
@@ -9,7 +9,7 @@
O_TARGET
:=
mm.o
export-objs
:=
shmem.o filemap.o mempool.o
export-objs
:=
shmem.o filemap.o mempool.o
page_alloc.o
obj-y
:=
memory.o mmap.o filemap.o mprotect.o mlock.o mremap.o
\
vmalloc.o slab.o bootmem.o swap.o vmscan.o page_io.o
\
...
...
mm/filemap.c
View file @
0c7f4371
...
...
@@ -740,6 +740,67 @@ static int read_cluster_nonblocking(struct file * file, unsigned long offset,
return
0
;
}
/*
* Knuth recommends primes in approximately golden ratio to the maximum
* integer representable by a machine word for multiplicative hashing.
* Chuck Lever verified the effectiveness of this technique:
* http://www.citi.umich.edu/techreports/reports/citi-tr-00-1.pdf
*
* These primes are chosen to be bit-sparse, that is operations on
* them can use shifts and additions instead of multiplications for
* machines where multiplications are slow.
*/
#if BITS_PER_LONG == 32
/* 2^31 + 2^29 - 2^25 + 2^22 - 2^19 - 2^16 + 1 */
#define GOLDEN_RATIO_PRIME 0x9e370001UL
#elif BITS_PER_LONG == 64
/* 2^63 + 2^61 - 2^57 + 2^54 - 2^51 - 2^18 + 1 */
#define GOLDEN_RATIO_PRIME 0x9e37fffffffc0001UL
#else
#error Define GOLDEN_RATIO_PRIME for your wordsize.
#endif
/*
* In order to wait for pages to become available there must be
* waitqueues associated with pages. By using a hash table of
* waitqueues where the bucket discipline is to maintain all
* waiters on the same queue and wake all when any of the pages
* become available, and for the woken contexts to check to be
* sure the appropriate page became available, this saves space
* at a cost of "thundering herd" phenomena during rare hash
* collisions.
*/
static
inline
wait_queue_head_t
*
page_waitqueue
(
struct
page
*
page
)
{
const
zone_t
*
zone
=
page_zone
(
page
);
wait_queue_head_t
*
wait
=
zone
->
wait_table
;
unsigned
long
hash
=
(
unsigned
long
)
page
;
#if BITS_PER_LONG == 64
/* Sigh, gcc can't optimise this alone like it does for 32 bits. */
unsigned
long
n
=
hash
;
n
<<=
18
;
hash
-=
n
;
n
<<=
33
;
hash
-=
n
;
n
<<=
3
;
hash
+=
n
;
n
<<=
3
;
hash
-=
n
;
n
<<=
4
;
hash
+=
n
;
n
<<=
2
;
hash
+=
n
;
#else
/* On some cpus multiply is faster, on others gcc will do shifts */
hash
*=
GOLDEN_RATIO_PRIME
;
#endif
hash
>>=
zone
->
wait_table_shift
;
return
&
wait
[
hash
];
}
/*
* Wait for a page to get unlocked.
*
...
...
@@ -749,10 +810,11 @@ static int read_cluster_nonblocking(struct file * file, unsigned long offset,
*/
void
___wait_on_page
(
struct
page
*
page
)
{
wait_queue_head_t
*
waitqueue
=
page_waitqueue
(
page
);
struct
task_struct
*
tsk
=
current
;
DECLARE_WAITQUEUE
(
wait
,
tsk
);
add_wait_queue
(
&
page
->
wait
,
&
wait
);
add_wait_queue
(
waitqueue
,
&
wait
);
do
{
set_task_state
(
tsk
,
TASK_UNINTERRUPTIBLE
);
if
(
!
PageLocked
(
page
))
...
...
@@ -760,19 +822,23 @@ void ___wait_on_page(struct page *page)
sync_page
(
page
);
schedule
();
}
while
(
PageLocked
(
page
));
tsk
->
state
=
TASK_RUNNING
;
remove_wait_queue
(
&
page
->
wait
,
&
wait
);
__set_task_state
(
tsk
,
TASK_RUNNING
)
;
remove_wait_queue
(
waitqueue
,
&
wait
);
}
/*
* Unlock the page and wake up sleepers in ___wait_on_page.
*/
void
unlock_page
(
struct
page
*
page
)
{
wait_queue_head_t
*
waitqueue
=
page_waitqueue
(
page
);
clear_bit
(
PG_launder
,
&
(
page
)
->
flags
);
smp_mb__before_clear_bit
();
if
(
!
test_and_clear_bit
(
PG_locked
,
&
(
page
)
->
flags
))
BUG
();
smp_mb__after_clear_bit
();
if
(
waitqueue_active
(
&
(
page
)
->
wait
))
wake_up
(
&
(
page
)
->
wait
);
if
(
waitqueue_active
(
waitqueue
))
wake_up_all
(
waitqueue
);
}
/*
...
...
@@ -781,10 +847,11 @@ void unlock_page(struct page *page)
*/
static
void
__lock_page
(
struct
page
*
page
)
{
wait_queue_head_t
*
waitqueue
=
page_waitqueue
(
page
);
struct
task_struct
*
tsk
=
current
;
DECLARE_WAITQUEUE
(
wait
,
tsk
);
add_wait_queue_exclusive
(
&
page
->
wait
,
&
wait
);
add_wait_queue_exclusive
(
waitqueue
,
&
wait
);
for
(;;)
{
set_task_state
(
tsk
,
TASK_UNINTERRUPTIBLE
);
if
(
PageLocked
(
page
))
{
...
...
@@ -794,10 +861,15 @@ static void __lock_page(struct page *page)
if
(
!
TryLockPage
(
page
))
break
;
}
tsk
->
state
=
TASK_RUNNING
;
remove_wait_queue
(
&
page
->
wait
,
&
wait
);
__set_task_state
(
tsk
,
TASK_RUNNING
)
;
remove_wait_queue
(
waitqueue
,
&
wait
);
}
void
wake_up_page
(
struct
page
*
page
)
{
wake_up
(
page_waitqueue
(
page
));
}
EXPORT_SYMBOL
(
wake_up_page
);
/*
* Get an exclusive lock on the page, optimistically
...
...
mm/highmem.c
View file @
0c7f4371
...
...
@@ -379,7 +379,7 @@ void create_bounce(unsigned long pfn, int gfp, struct bio **bio_orig)
/*
* is destination page below bounce pfn?
*/
if
((
page
-
page
->
zone
->
zone_mem_map
)
+
(
page
->
zone
->
zone_start_paddr
>>
PAGE_SHIFT
)
<
pfn
)
if
((
page
-
page
_zone
(
page
)
->
zone_mem_map
)
+
(
page_zone
(
page
)
->
zone_start_paddr
>>
PAGE_SHIFT
)
<
pfn
)
continue
;
/*
...
...
mm/page_alloc.c
View file @
0c7f4371
/*
* linux/mm/page_alloc.c
*
* Manages the free list, the system allocates free pages here.
* Note that kmalloc() lives in slab.c
*
* Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
* Swap reorganised 29.12.95, Stephen Tweedie
* Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
...
...
@@ -18,6 +21,7 @@
#include <linux/bootmem.h>
#include <linux/slab.h>
#include <linux/compiler.h>
#include <linux/module.h>
int
nr_swap_pages
;
int
nr_active_pages
;
...
...
@@ -26,6 +30,10 @@ struct list_head inactive_list;
struct
list_head
active_list
;
pg_data_t
*
pgdat_list
;
/* Used to look up the address of the struct zone encoded in page->zone */
zone_t
*
zone_table
[
MAX_NR_ZONES
*
MAX_NR_NODES
];
EXPORT_SYMBOL
(
zone_table
);
static
char
*
zone_names
[
MAX_NR_ZONES
]
=
{
"DMA"
,
"Normal"
,
"HighMem"
};
static
int
zone_balance_ratio
[
MAX_NR_ZONES
]
__initdata
=
{
128
,
128
,
128
,
};
static
int
zone_balance_min
[
MAX_NR_ZONES
]
__initdata
=
{
20
,
20
,
20
,
};
...
...
@@ -54,12 +62,31 @@ static int zone_balance_max[MAX_NR_ZONES] __initdata = { 255 , 255, 255, };
/*
* Temporary debugging check.
*/
#define BAD_RANGE(zone,x) (((zone) != (x)->zone) || (((x)-mem_map) < (zone)->zone_start_mapnr) || (((x)-mem_map) >= (zone)->zone_start_mapnr+(zone)->size))
#define BAD_RANGE(zone, page) \
( \
(((page) - mem_map) >= ((zone)->zone_start_mapnr+(zone)->size)) \
|| (((page) - mem_map) < (zone)->zone_start_mapnr) \
|| ((zone) != page_zone(page)) \
)
/*
* Buddy system. Hairy. You really aren't expected to understand this
* Freeing function for a buddy system allocator.
*
* The concept of a buddy system is to maintain direct-mapped table
* (containing bit values) for memory blocks of various "orders".
* The bottom level table contains the map for the smallest allocatable
* units of memory (here, pages), and each level above it describes
* pairs of units from the levels below, hence, "buddies".
* At a high level, all that happens here is marking the table entry
* at the bottom level available, and propagating the changes upward
* as necessary, plus some accounting needed to play nicely with other
* parts of the VM system.
*
* TODO: give references to descriptions of buddy system allocators,
* describe precisely the silly trick buddy allocators use to avoid
* storing an extra bit, utilizing entry point information.
*
*
Hint: -mask = 1+~mask
*
-- wli
*/
static
void
FASTCALL
(
__free_pages_ok
(
struct
page
*
page
,
unsigned
int
order
));
...
...
@@ -90,7 +117,7 @@ static void __free_pages_ok (struct page *page, unsigned int order)
goto
local_freelist
;
back_local_freelist:
zone
=
page
->
zone
;
zone
=
page
_zone
(
page
)
;
mask
=
(
~
0UL
)
<<
order
;
base
=
zone
->
zone_mem_map
;
...
...
@@ -117,6 +144,8 @@ static void __free_pages_ok (struct page *page, unsigned int order)
break
;
/*
* Move the buddy up one level.
* This code is taking advantage of the identity:
* -mask = 1+~mask
*/
buddy1
=
base
+
(
page_idx
^
-
mask
);
buddy2
=
base
+
page_idx
;
...
...
@@ -255,7 +284,7 @@ static struct page * balance_classzone(zone_t * classzone, unsigned int gfp_mask
entry
=
local_pages
->
next
;
do
{
tmp
=
list_entry
(
entry
,
struct
page
,
list
);
if
(
tmp
->
index
==
order
&&
memclass
(
tmp
->
zone
,
classzone
))
{
if
(
tmp
->
index
==
order
&&
memclass
(
page_zone
(
tmp
)
,
classzone
))
{
list_del
(
entry
);
current
->
nr_local_pages
--
;
set_page_count
(
tmp
,
1
);
...
...
@@ -625,6 +654,48 @@ static inline void build_zonelists(pg_data_t *pgdat)
}
}
/*
* Helper functions to size the waitqueue hash table.
* Essentially these want to choose hash table sizes sufficiently
* large so that collisions trying to wait on pages are rare.
* But in fact, the number of active page waitqueues on typical
* systems is ridiculously low, less than 200. So this is even
* conservative, even though it seems large.
*
* The constant PAGES_PER_WAITQUEUE specifies the ratio of pages to
* waitqueues, i.e. the size of the waitq table given the number of pages.
*/
#define PAGES_PER_WAITQUEUE 256
static
inline
unsigned
long
wait_table_size
(
unsigned
long
pages
)
{
unsigned
long
size
=
1
;
pages
/=
PAGES_PER_WAITQUEUE
;
while
(
size
<
pages
)
size
<<=
1
;
/*
* Once we have dozens or even hundreds of threads sleeping
* on IO we've got bigger problems than wait queue collision.
* Limit the size of the wait table to a reasonable size.
*/
size
=
min
(
size
,
4096UL
);
return
size
;
}
/*
* This is an integer logarithm so that shifts can be used later
* to extract the more random high bits from the multiplicative
* hash function before the remainder is taken.
*/
static
inline
unsigned
long
wait_table_bits
(
unsigned
long
size
)
{
return
ffz
(
~
size
);
}
#define LONG_ALIGN(x) (((x)+(sizeof(long))-1)&~((sizeof(long))-1))
/*
...
...
@@ -637,7 +708,6 @@ void __init free_area_init_core(int nid, pg_data_t *pgdat, struct page **gmap,
unsigned
long
*
zones_size
,
unsigned
long
zone_start_paddr
,
unsigned
long
*
zholes_size
,
struct
page
*
lmem_map
)
{
struct
page
*
p
;
unsigned
long
i
,
j
;
unsigned
long
map_size
;
unsigned
long
totalpages
,
offset
,
realtotalpages
;
...
...
@@ -680,24 +750,13 @@ void __init free_area_init_core(int nid, pg_data_t *pgdat, struct page **gmap,
pgdat
->
node_start_mapnr
=
(
lmem_map
-
mem_map
);
pgdat
->
nr_zones
=
0
;
/*
* Initially all pages are reserved - free ones are freed
* up by free_all_bootmem() once the early boot process is
* done.
*/
for
(
p
=
lmem_map
;
p
<
lmem_map
+
totalpages
;
p
++
)
{
set_page_count
(
p
,
0
);
SetPageReserved
(
p
);
init_waitqueue_head
(
&
p
->
wait
);
memlist_init
(
&
p
->
list
);
}
offset
=
lmem_map
-
mem_map
;
for
(
j
=
0
;
j
<
MAX_NR_ZONES
;
j
++
)
{
zone_t
*
zone
=
pgdat
->
node_zones
+
j
;
unsigned
long
mask
;
unsigned
long
size
,
realsize
;
zone_table
[
nid
*
MAX_NR_ZONES
+
j
]
=
zone
;
realsize
=
size
=
zones_size
[
j
];
if
(
zholes_size
)
realsize
-=
zholes_size
[
j
];
...
...
@@ -712,6 +771,20 @@ void __init free_area_init_core(int nid, pg_data_t *pgdat, struct page **gmap,
if
(
!
size
)
continue
;
/*
* The per-page waitqueue mechanism uses hashed waitqueues
* per zone.
*/
zone
->
wait_table_size
=
wait_table_size
(
size
);
zone
->
wait_table_shift
=
BITS_PER_LONG
-
wait_table_bits
(
zone
->
wait_table_size
);
zone
->
wait_table
=
(
wait_queue_head_t
*
)
alloc_bootmem_node
(
pgdat
,
zone
->
wait_table_size
*
sizeof
(
wait_queue_head_t
));
for
(
i
=
0
;
i
<
zone
->
wait_table_size
;
++
i
)
init_waitqueue_head
(
zone
->
wait_table
+
i
);
pgdat
->
nr_zones
=
j
+
1
;
mask
=
(
realsize
/
zone_balance_ratio
[
j
]);
...
...
@@ -730,11 +803,19 @@ void __init free_area_init_core(int nid, pg_data_t *pgdat, struct page **gmap,
if
((
zone_start_paddr
>>
PAGE_SHIFT
)
&
(
zone_required_alignment
-
1
))
printk
(
"BUG: wrong zone alignment, it will crash
\n
"
);
/*
* Initially all pages are reserved - free ones are freed
* up by free_all_bootmem() once the early boot process is
* done. Non-atomic initialization, single-pass.
*/
for
(
i
=
0
;
i
<
size
;
i
++
)
{
struct
page
*
page
=
mem_map
+
offset
+
i
;
page
->
zone
=
zone
;
set_page_zone
(
page
,
nid
*
MAX_NR_ZONES
+
j
);
init_page_count
(
page
);
__SetPageReserved
(
page
);
memlist_init
(
&
page
->
list
);
if
(
j
!=
ZONE_HIGHMEM
)
page
->
virtual
=
__va
(
zone_start_paddr
);
set_page_address
(
page
,
__va
(
zone_start_paddr
)
);
zone_start_paddr
+=
PAGE_SIZE
;
}
...
...
mm/vmscan.c
View file @
0c7f4371
...
...
@@ -59,7 +59,7 @@ static inline int try_to_swap_out(struct mm_struct * mm, struct vm_area_struct*
return
0
;
/* Don't bother replenishing zones not under pressure.. */
if
(
!
memclass
(
page
->
zone
,
classzone
))
if
(
!
memclass
(
page
_zone
(
page
)
,
classzone
))
return
0
;
if
(
TryLockPage
(
page
))
...
...
@@ -372,7 +372,7 @@ static int shrink_cache(int nr_pages, zone_t * classzone, unsigned int gfp_mask,
if
(
unlikely
(
!
page_count
(
page
)))
continue
;
if
(
!
memclass
(
page
->
zone
,
classzone
))
if
(
!
memclass
(
page
_zone
(
page
)
,
classzone
))
continue
;
/* Racy check to avoid trylocking when not worthwhile */
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment