Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
linux
Commits
c56208f6
Commit
c56208f6
authored
Feb 22, 2002
by
Anton Blanchard
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
ppc64: update for pte in highmem changes
parent
79b65813
Changes
7
Show whitespace changes
Inline
Side-by-side
Showing
7 changed files
with
71 additions
and
120 deletions
+71
-120
arch/ppc64/kernel/htab.c
arch/ppc64/kernel/htab.c
+1
-1
arch/ppc64/kernel/idle.c
arch/ppc64/kernel/idle.c
+2
-6
arch/ppc64/mm/init.c
arch/ppc64/mm/init.c
+3
-21
include/asm-ppc64/Paca.h
include/asm-ppc64/Paca.h
+4
-4
include/asm-ppc64/page.h
include/asm-ppc64/page.h
+7
-0
include/asm-ppc64/pgalloc.h
include/asm-ppc64/pgalloc.h
+47
-81
include/asm-ppc64/pgtable.h
include/asm-ppc64/pgtable.h
+7
-7
No files found.
arch/ppc64/kernel/htab.c
View file @
c56208f6
...
...
@@ -639,7 +639,7 @@ pte_t * find_linux_pte( pgd_t * pgdir, unsigned long ea )
pm
=
pmd_offset
(
pg
,
ea
);
if
(
!
pmd_none
(
*
pm
)
)
{
pt
=
pte_offset
(
pm
,
ea
);
pt
=
pte_offset
_kernel
(
pm
,
ea
);
pte
=
*
pt
;
if
(
!
pte_present
(
pte
)
)
pt
=
NULL
;
...
...
arch/ppc64/kernel/idle.c
View file @
c56208f6
...
...
@@ -91,10 +91,8 @@ int idled(void)
paca
=
(
struct
Paca
*
)
mfspr
(
SPRG3
);
while
(
1
)
{
if
(
need_resched
())
{
if
(
need_resched
())
schedule
();
check_pgt_cache
();
}
}
for
(;;)
{
...
...
@@ -122,10 +120,8 @@ int idled(void)
}
}
HMT_medium
();
if
(
need_resched
())
{
if
(
need_resched
())
schedule
();
check_pgt_cache
();
}
}
return
0
;
}
...
...
arch/ppc64/mm/init.c
View file @
c56208f6
...
...
@@ -113,23 +113,6 @@ unsigned long __max_memory;
*/
mmu_gather_t
mmu_gathers
[
NR_CPUS
];
int
do_check_pgt_cache
(
int
low
,
int
high
)
{
int
freed
=
0
;
if
(
pgtable_cache_size
>
high
)
{
do
{
if
(
pgd_quicklist
)
free_page
((
unsigned
long
)
pgd_alloc_one_fast
(
0
)),
++
freed
;
if
(
pmd_quicklist
)
free_page
((
unsigned
long
)
pmd_alloc_one_fast
(
0
,
0
)),
++
freed
;
if
(
pte_quicklist
)
free_page
((
unsigned
long
)
pte_alloc_one_fast
(
0
,
0
)),
++
freed
;
}
while
(
pgtable_cache_size
>
low
);
}
return
freed
;
}
void
show_mem
(
void
)
{
int
i
,
free
=
0
,
total
=
0
,
reserved
=
0
;
...
...
@@ -155,7 +138,6 @@ void show_mem(void)
printk
(
"%d reserved pages
\n
"
,
reserved
);
printk
(
"%d pages shared
\n
"
,
shared
);
printk
(
"%d pages swap cached
\n
"
,
cached
);
printk
(
"%d pages in page table cache
\n
"
,(
int
)
pgtable_cache_size
);
show_buffers
();
}
...
...
@@ -260,7 +242,7 @@ static void map_io_page(unsigned long ea, unsigned long pa, int flags)
spin_lock
(
&
ioremap_mm
.
page_table_lock
);
pgdp
=
pgd_offset_i
(
ea
);
pmdp
=
pmd_alloc
(
&
ioremap_mm
,
pgdp
,
ea
);
ptep
=
pte_alloc
(
&
ioremap_mm
,
pmdp
,
ea
);
ptep
=
pte_alloc
_kernel
(
&
ioremap_mm
,
pmdp
,
ea
);
pa
=
absolute_to_phys
(
pa
);
set_pte
(
ptep
,
mk_pte_phys
(
pa
&
PAGE_MASK
,
__pgprot
(
flags
)));
...
...
@@ -336,7 +318,7 @@ local_flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
if
(
!
pgd_none
(
*
pgd
))
{
pmd
=
pmd_offset
(
pgd
,
vmaddr
);
if
(
!
pmd_none
(
*
pmd
))
{
ptep
=
pte_offset
(
pmd
,
vmaddr
);
ptep
=
pte_offset
_kernel
(
pmd
,
vmaddr
);
/* Check if HPTE might exist and flush it if so */
pte
=
__pte
(
pte_update
(
ptep
,
_PAGE_HPTEFLAGS
,
0
));
if
(
pte_val
(
pte
)
&
_PAGE_HASHPTE
)
{
...
...
@@ -391,7 +373,7 @@ local_flush_tlb_range(struct mm_struct *mm, unsigned long start, unsigned long e
if
(
pmd_end
>
end
)
pmd_end
=
end
;
if
(
!
pmd_none
(
*
pmd
)
)
{
ptep
=
pte_offset
(
pmd
,
start
);
ptep
=
pte_offset
_kernel
(
pmd
,
start
);
do
{
if
(
pte_val
(
*
ptep
)
&
_PAGE_HASHPTE
)
{
pte
=
__pte
(
pte_update
(
ptep
,
_PAGE_HPTEFLAGS
,
0
));
...
...
include/asm-ppc64/Paca.h
View file @
c56208f6
...
...
@@ -92,10 +92,10 @@ struct Paca
* CACHE_LINE_2 0x0080 - 0x00FF
*=====================================================================================
*/
u64
*
pgd_cache
;
/* 0x00 */
u64
*
pmd_cache
;
/* 0x08 */
u64
*
pte_cache
;
/* 0x10 */
u64
pgtable_cache_sz
;
/* 0x18 */
u64
spare1
;
/* 0x00 */
u64
spare2
;
/* 0x08 */
u64
spare3
;
/* 0x10 */
u64
spare4
;
/* 0x18 */
u64
next_jiffy_update_tb
;
/* TB value for next jiffy update 0x20 */
u32
lpEvent_count
;
/* lpEvents processed 0x28 */
u32
prof_multiplier
;
/* 0x2C */
...
...
include/asm-ppc64/page.h
View file @
c56208f6
...
...
@@ -130,6 +130,13 @@ extern void xmon(struct pt_regs *excp);
#define PAGE_BUG(page) do { BUG(); } while (0)
/*
* XXX A bug in the current ppc64 compiler prevents an optimisation
* where a divide is replaced by a multiply by shifted inverse. For
* the moment use page->virtaul
*/
#define WANT_PAGE_VIRTUAL 1
/* Pure 2^n version of get_order */
extern
__inline__
int
get_order
(
unsigned
long
size
)
{
...
...
include/asm-ppc64/pgalloc.h
View file @
c56208f6
...
...
@@ -12,116 +12,82 @@
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#define quicklists get_paca()
#define pgd_quicklist (quicklists->pgd_cache)
#define pmd_quicklist (quicklists->pmd_cache)
#define pte_quicklist (quicklists->pte_cache)
#define pgtable_cache_size (quicklists->pgtable_cache_sz)
static
inline
pgd_t
*
pgd_alloc_one_fast
(
struct
mm_struct
*
mm
)
{
unsigned
long
*
ret
=
pgd_quicklist
;
if
(
ret
!=
NULL
)
{
pgd_quicklist
=
(
unsigned
long
*
)(
*
ret
);
ret
[
0
]
=
0
;
--
pgtable_cache_size
;
}
else
ret
=
NULL
;
return
(
pgd_t
*
)
ret
;
}
static
inline
pgd_t
*
pgd_alloc
(
struct
mm_struct
*
mm
)
static
inline
pgd_t
*
pgd_alloc
(
struct
mm_struct
*
mm
)
{
/* the VM system never calls pgd_alloc_one_fast(), so we do it here. */
pgd_t
*
pgd
=
pgd_alloc_one_fast
(
mm
);
if
(
pgd
==
NULL
)
{
pgd
=
(
pgd_t
*
)
__get_free_page
(
GFP_KERNEL
);
pgd_t
*
pgd
=
(
pgd_t
*
)
__get_free_page
(
GFP_KERNEL
);
if
(
pgd
!=
NULL
)
clear_page
(
pgd
);
}
return
pgd
;
}
static
inline
void
pgd_free
(
pgd_t
*
pgd
)
pgd_free
(
pgd_t
*
pgd
)
{
*
(
unsigned
long
*
)
pgd
=
(
unsigned
long
)
pgd_quicklist
;
pgd_quicklist
=
(
unsigned
long
*
)
pgd
;
++
pgtable_cache_size
;
free_page
((
unsigned
long
)
pgd
);
}
#define pgd_populate(MM, PGD, PMD) pgd_set(PGD, PMD)
static
inline
pmd_t
*
pmd_alloc_one
_fast
(
struct
mm_struct
*
mm
,
unsigned
long
addr
)
static
inline
pmd_t
*
pmd_alloc_one
(
struct
mm_struct
*
mm
,
unsigned
long
addr
)
{
unsigned
long
*
ret
=
(
unsigned
long
*
)
pmd_quicklist
;
int
count
=
0
;
pmd_t
*
pmd
;
if
(
ret
!=
NULL
)
{
pmd_quicklist
=
(
unsigned
long
*
)(
*
ret
);
ret
[
0
]
=
0
;
--
pgtable_cache_size
;
do
{
pmd
=
(
pmd_t
*
)
__get_free_page
(
GFP_KERNEL
);
if
(
pmd
)
clear_page
(
pmd
);
else
{
current
->
state
=
TASK_UNINTERRUPTIBLE
;
schedule_timeout
(
HZ
);
}
return
(
pmd_t
*
)
ret
;
}
}
while
(
!
pmd
&&
(
count
++
<
10
));
static
inline
pmd_t
*
pmd_alloc_one
(
struct
mm_struct
*
mm
,
unsigned
long
addr
)
{
pmd_t
*
pmd
=
(
pmd_t
*
)
__get_free_page
(
GFP_KERNEL
);
if
(
pmd
!=
NULL
)
clear_page
(
pmd
);
return
pmd
;
}
static
inline
void
pmd_free
(
pmd_t
*
pmd
)
pmd_free
(
pmd_t
*
pmd
)
{
*
(
unsigned
long
*
)
pmd
=
(
unsigned
long
)
pmd_quicklist
;
pmd_quicklist
=
(
unsigned
long
*
)
pmd
;
++
pgtable_cache_size
;
free_page
((
unsigned
long
)
pmd
);
}
#define pmd_populate(MM, PMD, PTE) pmd_set(PMD, PTE)
#define pmd_populate(mm, pmd, pte) pmd_set(pmd, pte)
#define pmd_populate_kernel(mm, pmd, pte) pmd_set(pmd, pte)
static
inline
pte_t
*
pte_alloc_one
_fast
(
struct
mm_struct
*
mm
,
unsigned
long
addr
)
static
inline
pte_t
*
pte_alloc_one
(
struct
mm_struct
*
mm
,
unsigned
long
addr
)
{
unsigned
long
*
ret
=
(
unsigned
long
*
)
pte_quicklist
;
int
count
=
0
;
pte_t
*
pte
;
if
(
ret
!=
NULL
)
{
pte_quicklist
=
(
unsigned
long
*
)(
*
ret
);
ret
[
0
]
=
0
;
--
pgtable_cache_size
;
do
{
pte
=
(
pte_t
*
)
__get_free_page
(
GFP_KERNEL
);
if
(
pte
)
clear_page
(
pte
);
else
{
current
->
state
=
TASK_UNINTERRUPTIBLE
;
schedule_timeout
(
HZ
);
}
return
(
pte_t
*
)
ret
;
}
static
inline
pte_t
*
pte_alloc_one
(
struct
mm_struct
*
mm
,
unsigned
long
addr
)
{
pte_t
*
pte
=
(
pte_t
*
)
__get_free_page
(
GFP_KERNEL
);
}
while
(
!
pte
&&
(
count
++
<
10
));
if
(
pte
!=
NULL
)
clear_page
(
pte
);
return
pte
;
}
#define pte_alloc_one_kernel(mm, address) pte_alloc_one((mm), (address))
static
inline
void
pte_free
(
pte_t
*
pte
)
pte_free
(
pte_t
*
pte
)
{
*
(
unsigned
long
*
)
pte
=
(
unsigned
long
)
pte_quicklist
;
pte_quicklist
=
(
unsigned
long
*
)
pte
;
++
pgtable_cache_size
;
free_page
((
unsigned
long
)
pte
);
}
extern
int
do_check_pgt_cache
(
int
,
int
);
#define pte_free_kernel(pte) pte_free(pte)
#endif
/* _PPC64_PGALLOC_H */
include/asm-ppc64/pgtable.h
View file @
c56208f6
...
...
@@ -202,6 +202,7 @@ extern unsigned long empty_zero_page[PAGE_SIZE/sizeof(unsigned long)];
#define pmd_present(pmd) ((pmd_val(pmd)) != 0)
#define pmd_clear(pmdp) (pmd_val(*(pmdp)) = 0)
#define pmd_page(pmd) (__bpn_to_ba(pmd_val(pmd)))
#define pmd_page_kernel(pmd) pmd_page(pmd)
#define pgd_set(pgdp, pmdp) (pgd_val(*(pgdp)) = (__ba_to_bpn(pmdp)))
#define pgd_none(pgd) (!pgd_val(pgd))
#define pgd_bad(pgd) ((pgd_val(pgd)) == 0)
...
...
@@ -222,9 +223,14 @@ extern unsigned long empty_zero_page[PAGE_SIZE/sizeof(unsigned long)];
((pmd_t *) pgd_page(*(dir)) + (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1)))
/* Find an entry in the third-level page table.. */
#define pte_offset(dir,addr) \
#define pte_offset
_kernel
(dir,addr) \
((pte_t *) pmd_page(*(dir)) + (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)))
#define pte_offset_map(dir,addr) pte_offset_kernel((dir), (addr))
#define pte_offset_map_nested(dir,addr) pte_offset_kernel((dir), (addr))
#define pte_unmap(pte) do { } while(0)
#define pte_unmap_nested(pte) do { } while(0)
/* to find an entry in a kernel page-table-directory */
/* This now only contains the vmalloc pages */
#define pgd_offset_k(address) pgd_offset(&init_mm, address)
...
...
@@ -232,12 +238,6 @@ extern unsigned long empty_zero_page[PAGE_SIZE/sizeof(unsigned long)];
/* to find an entry in the ioremap page-table-directory */
#define pgd_offset_i(address) (ioremap_pgd + pgd_index(address))
/*
* Given a pointer to an mem_map[] entry, return the kernel virtual
* address corresponding to that page.
*/
#define page_address(page) ((page)->virtual)
#define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT))
/*
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment