Commit 8745808f authored by Michal Hocko's avatar Michal Hocko Committed by Linus Torvalds

mm, arch: remove empty_bad_page*

empty_bad_page() and empty_bad_pte_table() seem to be relics from old
days which is not used by any code for a long time.  I have tried to
find when exactly but this is not really all that straightforward due to
many code movements - traces disappear around 2.4 times.

Anyway no code really references neither empty_bad_page nor
empty_bad_pte_table.  We only allocate the storage which is not used by
anybody so remove them.

Link: http://lkml.kernel.org/r/20171004150045.30755-1-mhocko@kernel.orgSigned-off-by: default avatarMichal Hocko <mhocko@suse.com>
Acked-by: default avatarRalf Baechle <ralf@linus-mips.org>
Acked-by: default avatarIngo Molnar <mingo@kernel.org>
Cc: Yoshinori Sato <ysato@users.sourceforge.jp>
Cc: David Howells <dhowells@redhat.com>
Cc: Rich Felker <dalias@libc.org>
Cc: Jeff Dike <jdike@addtoit.com>
Cc: Richard Weinberger <richard@nod.at>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent a2e16731
...@@ -42,21 +42,9 @@ ...@@ -42,21 +42,9 @@
#undef DEBUG #undef DEBUG
/* /*
* BAD_PAGE is the page that is used for page faults when linux
* is out-of-memory. Older versions of linux just did a
* do_exit(), but using this instead means there is less risk
* for a process dying in kernel mode, possibly leaving a inode
* unused etc..
*
* BAD_PAGETABLE is the accompanying page-table: it is initialized
* to point to BAD_PAGE entries.
*
* ZERO_PAGE is a special page that is used for zero-initialized * ZERO_PAGE is a special page that is used for zero-initialized
* data and COW. * data and COW.
*/ */
static unsigned long empty_bad_page_table;
static unsigned long empty_bad_page;
unsigned long empty_zero_page; unsigned long empty_zero_page;
EXPORT_SYMBOL(empty_zero_page); EXPORT_SYMBOL(empty_zero_page);
...@@ -72,8 +60,6 @@ void __init paging_init(void) ...@@ -72,8 +60,6 @@ void __init paging_init(void)
unsigned long zones_size[MAX_NR_ZONES] = {0, }; unsigned long zones_size[MAX_NR_ZONES] = {0, };
/* allocate some pages for kernel housekeeping tasks */ /* allocate some pages for kernel housekeeping tasks */
empty_bad_page_table = (unsigned long) alloc_bootmem_pages(PAGE_SIZE);
empty_bad_page = (unsigned long) alloc_bootmem_pages(PAGE_SIZE);
empty_zero_page = (unsigned long) alloc_bootmem_pages(PAGE_SIZE); empty_zero_page = (unsigned long) alloc_bootmem_pages(PAGE_SIZE);
memset((void *) empty_zero_page, 0, PAGE_SIZE); memset((void *) empty_zero_page, 0, PAGE_SIZE);
......
...@@ -40,20 +40,9 @@ ...@@ -40,20 +40,9 @@
#include <asm/sections.h> #include <asm/sections.h>
/* /*
* BAD_PAGE is the page that is used for page faults when linux
* is out-of-memory. Older versions of linux just did a
* do_exit(), but using this instead means there is less risk
* for a process dying in kernel mode, possibly leaving a inode
* unused etc..
*
* BAD_PAGETABLE is the accompanying page-table: it is initialized
* to point to BAD_PAGE entries.
*
* ZERO_PAGE is a special page that is used for zero-initialized * ZERO_PAGE is a special page that is used for zero-initialized
* data and COW. * data and COW.
*/ */
static unsigned long empty_bad_page_table;
static unsigned long empty_bad_page;
unsigned long empty_zero_page; unsigned long empty_zero_page;
/* /*
...@@ -78,8 +67,6 @@ void __init paging_init(void) ...@@ -78,8 +67,6 @@ void __init paging_init(void)
* Initialize the bad page table and bad page to point * Initialize the bad page table and bad page to point
* to a couple of allocated pages. * to a couple of allocated pages.
*/ */
empty_bad_page_table = (unsigned long)alloc_bootmem_pages(PAGE_SIZE);
empty_bad_page = (unsigned long)alloc_bootmem_pages(PAGE_SIZE);
empty_zero_page = (unsigned long)alloc_bootmem_pages(PAGE_SIZE); empty_zero_page = (unsigned long)alloc_bootmem_pages(PAGE_SIZE);
memset((void *)empty_zero_page, 0, PAGE_SIZE); memset((void *)empty_zero_page, 0, PAGE_SIZE);
......
...@@ -31,12 +31,7 @@ ...@@ -31,12 +31,7 @@
* tables. Each page table is also a single 4K page, giving 512 (== * tables. Each page table is also a single 4K page, giving 512 (==
* PTRS_PER_PTE) 8 byte ptes. Each pud entry is initialized to point to * PTRS_PER_PTE) 8 byte ptes. Each pud entry is initialized to point to
* invalid_pmd_table, each pmd entry is initialized to point to * invalid_pmd_table, each pmd entry is initialized to point to
* invalid_pte_table, each pte is initialized to 0. When memory is low, * invalid_pte_table, each pte is initialized to 0.
* and a pmd table or a page table allocation fails, empty_bad_pmd_table
* and empty_bad_page_table is returned back to higher layer code, so
* that the failure is recognized later on. Linux does not seem to
* handle these failures very well though. The empty_bad_page_table has
* invalid pte entries in it, to force page faults.
* *
* Kernel mappings: kernel mappings are held in the swapper_pg_table. * Kernel mappings: kernel mappings are held in the swapper_pg_table.
* The layout is identical to userspace except it's indexed with the * The layout is identical to userspace except it's indexed with the
...@@ -175,7 +170,6 @@ ...@@ -175,7 +170,6 @@
printk("%s:%d: bad pgd %016lx.\n", __FILE__, __LINE__, pgd_val(e)) printk("%s:%d: bad pgd %016lx.\n", __FILE__, __LINE__, pgd_val(e))
extern pte_t invalid_pte_table[PTRS_PER_PTE]; extern pte_t invalid_pte_table[PTRS_PER_PTE];
extern pte_t empty_bad_page_table[PTRS_PER_PTE];
#ifndef __PAGETABLE_PUD_FOLDED #ifndef __PAGETABLE_PUD_FOLDED
/* /*
......
...@@ -433,14 +433,6 @@ ENTRY(swapper_pg_dir) ...@@ -433,14 +433,6 @@ ENTRY(swapper_pg_dir)
ENTRY(empty_zero_page) ENTRY(empty_zero_page)
.space PAGE_SIZE .space PAGE_SIZE
.balign PAGE_SIZE
ENTRY(empty_bad_page)
.space PAGE_SIZE
.balign PAGE_SIZE
ENTRY(empty_bad_pte_table)
.space PAGE_SIZE
.balign PAGE_SIZE .balign PAGE_SIZE
ENTRY(large_page_table) ENTRY(large_page_table)
.space PAGE_SIZE .space PAGE_SIZE
......
...@@ -101,14 +101,6 @@ empty_zero_page: ...@@ -101,14 +101,6 @@ empty_zero_page:
mmu_pdtp_cache: mmu_pdtp_cache:
.space PAGE_SIZE, 0 .space PAGE_SIZE, 0
.global empty_bad_page
empty_bad_page:
.space PAGE_SIZE, 0
.global empty_bad_pte_table
empty_bad_pte_table:
.space PAGE_SIZE, 0
.global fpu_in_use .global fpu_in_use
fpu_in_use: .quad 0 fpu_in_use: .quad 0
......
...@@ -22,8 +22,6 @@ ...@@ -22,8 +22,6 @@
/* allocated in paging_init, zeroed in mem_init, and unchanged thereafter */ /* allocated in paging_init, zeroed in mem_init, and unchanged thereafter */
unsigned long *empty_zero_page = NULL; unsigned long *empty_zero_page = NULL;
EXPORT_SYMBOL(empty_zero_page); EXPORT_SYMBOL(empty_zero_page);
/* allocated in paging_init and unchanged thereafter */
static unsigned long *empty_bad_page = NULL;
/* /*
* Initialized during boot, and readonly for initializing page tables * Initialized during boot, and readonly for initializing page tables
...@@ -146,7 +144,6 @@ void __init paging_init(void) ...@@ -146,7 +144,6 @@ void __init paging_init(void)
int i; int i;
empty_zero_page = (unsigned long *) alloc_bootmem_low_pages(PAGE_SIZE); empty_zero_page = (unsigned long *) alloc_bootmem_low_pages(PAGE_SIZE);
empty_bad_page = (unsigned long *) alloc_bootmem_low_pages(PAGE_SIZE);
for (i = 0; i < ARRAY_SIZE(zones_size); i++) for (i = 0; i < ARRAY_SIZE(zones_size); i++)
zones_size[i] = 0; zones_size[i] = 0;
......
...@@ -18,7 +18,7 @@ ...@@ -18,7 +18,7 @@
* Various page->flags bits: * Various page->flags bits:
* *
* PG_reserved is set for special pages, which can never be swapped out. Some * PG_reserved is set for special pages, which can never be swapped out. Some
* of them might not even exist (eg empty_bad_page)... * of them might not even exist...
* *
* The PG_private bitflag is set on pagecache pages if they contain filesystem * The PG_private bitflag is set on pagecache pages if they contain filesystem
* specific data (which is normally at page->private). It can be used by * specific data (which is normally at page->private). It can be used by
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment