Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
linux
Commits
4c13629f
Commit
4c13629f
authored
Dec 01, 2010
by
Jeremy Fitzhardinge
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
xen: make a pile of mmu pvop functions static
Signed-off-by:
Jeremy Fitzhardinge
<
jeremy.fitzhardinge@citrix.com
>
parent
ef691947
Changes
2
Show whitespace changes
Inline
Side-by-side
Showing
2 changed files
with
23 additions
and
60 deletions
+23
-60
arch/x86/xen/mmu.c
arch/x86/xen/mmu.c
+23
-23
arch/x86/xen/mmu.h
arch/x86/xen/mmu.h
+0
-37
No files found.
arch/x86/xen/mmu.c
View file @
4c13629f
...
@@ -283,7 +283,7 @@ static void xen_extend_mmu_update(const struct mmu_update *update)
...
@@ -283,7 +283,7 @@ static void xen_extend_mmu_update(const struct mmu_update *update)
*
u
=
*
update
;
*
u
=
*
update
;
}
}
void
xen_set_pmd_hyper
(
pmd_t
*
ptr
,
pmd_t
val
)
static
void
xen_set_pmd_hyper
(
pmd_t
*
ptr
,
pmd_t
val
)
{
{
struct
mmu_update
u
;
struct
mmu_update
u
;
...
@@ -303,7 +303,7 @@ void xen_set_pmd_hyper(pmd_t *ptr, pmd_t val)
...
@@ -303,7 +303,7 @@ void xen_set_pmd_hyper(pmd_t *ptr, pmd_t val)
preempt_enable
();
preempt_enable
();
}
}
void
xen_set_pmd
(
pmd_t
*
ptr
,
pmd_t
val
)
static
void
xen_set_pmd
(
pmd_t
*
ptr
,
pmd_t
val
)
{
{
ADD_STATS
(
pmd_update
,
1
);
ADD_STATS
(
pmd_update
,
1
);
...
@@ -346,7 +346,7 @@ static bool xen_batched_set_pte(pte_t *ptep, pte_t pteval)
...
@@ -346,7 +346,7 @@ static bool xen_batched_set_pte(pte_t *ptep, pte_t pteval)
return
true
;
return
true
;
}
}
void
xen_set_pte
(
pte_t
*
ptep
,
pte_t
pteval
)
static
void
xen_set_pte
(
pte_t
*
ptep
,
pte_t
pteval
)
{
{
ADD_STATS
(
pte_update
,
1
);
ADD_STATS
(
pte_update
,
1
);
// ADD_STATS(pte_update_pinned, xen_page_pinned(ptep));
// ADD_STATS(pte_update_pinned, xen_page_pinned(ptep));
...
@@ -355,7 +355,7 @@ void xen_set_pte(pte_t *ptep, pte_t pteval)
...
@@ -355,7 +355,7 @@ void xen_set_pte(pte_t *ptep, pte_t pteval)
native_set_pte
(
ptep
,
pteval
);
native_set_pte
(
ptep
,
pteval
);
}
}
void
xen_set_pte_at
(
struct
mm_struct
*
mm
,
unsigned
long
addr
,
static
void
xen_set_pte_at
(
struct
mm_struct
*
mm
,
unsigned
long
addr
,
pte_t
*
ptep
,
pte_t
pteval
)
pte_t
*
ptep
,
pte_t
pteval
)
{
{
xen_set_pte
(
ptep
,
pteval
);
xen_set_pte
(
ptep
,
pteval
);
...
@@ -449,7 +449,7 @@ static pteval_t iomap_pte(pteval_t val)
...
@@ -449,7 +449,7 @@ static pteval_t iomap_pte(pteval_t val)
return
val
;
return
val
;
}
}
pteval_t
xen_pte_val
(
pte_t
pte
)
static
pteval_t
xen_pte_val
(
pte_t
pte
)
{
{
pteval_t
pteval
=
pte
.
pte
;
pteval_t
pteval
=
pte
.
pte
;
...
@@ -466,7 +466,7 @@ pteval_t xen_pte_val(pte_t pte)
...
@@ -466,7 +466,7 @@ pteval_t xen_pte_val(pte_t pte)
}
}
PV_CALLEE_SAVE_REGS_THUNK
(
xen_pte_val
);
PV_CALLEE_SAVE_REGS_THUNK
(
xen_pte_val
);
pgdval_t
xen_pgd_val
(
pgd_t
pgd
)
static
pgdval_t
xen_pgd_val
(
pgd_t
pgd
)
{
{
return
pte_mfn_to_pfn
(
pgd
.
pgd
);
return
pte_mfn_to_pfn
(
pgd
.
pgd
);
}
}
...
@@ -497,7 +497,7 @@ void xen_set_pat(u64 pat)
...
@@ -497,7 +497,7 @@ void xen_set_pat(u64 pat)
WARN_ON
(
pat
!=
0x0007010600070106ull
);
WARN_ON
(
pat
!=
0x0007010600070106ull
);
}
}
pte_t
xen_make_pte
(
pteval_t
pte
)
static
pte_t
xen_make_pte
(
pteval_t
pte
)
{
{
phys_addr_t
addr
=
(
pte
&
PTE_PFN_MASK
);
phys_addr_t
addr
=
(
pte
&
PTE_PFN_MASK
);
...
@@ -567,20 +567,20 @@ pte_t xen_make_pte_debug(pteval_t pte)
...
@@ -567,20 +567,20 @@ pte_t xen_make_pte_debug(pteval_t pte)
PV_CALLEE_SAVE_REGS_THUNK
(
xen_make_pte_debug
);
PV_CALLEE_SAVE_REGS_THUNK
(
xen_make_pte_debug
);
#endif
#endif
pgd_t
xen_make_pgd
(
pgdval_t
pgd
)
static
pgd_t
xen_make_pgd
(
pgdval_t
pgd
)
{
{
pgd
=
pte_pfn_to_mfn
(
pgd
);
pgd
=
pte_pfn_to_mfn
(
pgd
);
return
native_make_pgd
(
pgd
);
return
native_make_pgd
(
pgd
);
}
}
PV_CALLEE_SAVE_REGS_THUNK
(
xen_make_pgd
);
PV_CALLEE_SAVE_REGS_THUNK
(
xen_make_pgd
);
pmdval_t
xen_pmd_val
(
pmd_t
pmd
)
static
pmdval_t
xen_pmd_val
(
pmd_t
pmd
)
{
{
return
pte_mfn_to_pfn
(
pmd
.
pmd
);
return
pte_mfn_to_pfn
(
pmd
.
pmd
);
}
}
PV_CALLEE_SAVE_REGS_THUNK
(
xen_pmd_val
);
PV_CALLEE_SAVE_REGS_THUNK
(
xen_pmd_val
);
void
xen_set_pud_hyper
(
pud_t
*
ptr
,
pud_t
val
)
static
void
xen_set_pud_hyper
(
pud_t
*
ptr
,
pud_t
val
)
{
{
struct
mmu_update
u
;
struct
mmu_update
u
;
...
@@ -600,7 +600,7 @@ void xen_set_pud_hyper(pud_t *ptr, pud_t val)
...
@@ -600,7 +600,7 @@ void xen_set_pud_hyper(pud_t *ptr, pud_t val)
preempt_enable
();
preempt_enable
();
}
}
void
xen_set_pud
(
pud_t
*
ptr
,
pud_t
val
)
static
void
xen_set_pud
(
pud_t
*
ptr
,
pud_t
val
)
{
{
ADD_STATS
(
pud_update
,
1
);
ADD_STATS
(
pud_update
,
1
);
...
@@ -617,24 +617,24 @@ void xen_set_pud(pud_t *ptr, pud_t val)
...
@@ -617,24 +617,24 @@ void xen_set_pud(pud_t *ptr, pud_t val)
}
}
#ifdef CONFIG_X86_PAE
#ifdef CONFIG_X86_PAE
void
xen_set_pte_atomic
(
pte_t
*
ptep
,
pte_t
pte
)
static
void
xen_set_pte_atomic
(
pte_t
*
ptep
,
pte_t
pte
)
{
{
set_64bit
((
u64
*
)
ptep
,
native_pte_val
(
pte
));
set_64bit
((
u64
*
)
ptep
,
native_pte_val
(
pte
));
}
}
void
xen_pte_clear
(
struct
mm_struct
*
mm
,
unsigned
long
addr
,
pte_t
*
ptep
)
static
void
xen_pte_clear
(
struct
mm_struct
*
mm
,
unsigned
long
addr
,
pte_t
*
ptep
)
{
{
if
(
!
xen_batched_set_pte
(
ptep
,
native_make_pte
(
0
)))
if
(
!
xen_batched_set_pte
(
ptep
,
native_make_pte
(
0
)))
native_pte_clear
(
mm
,
addr
,
ptep
);
native_pte_clear
(
mm
,
addr
,
ptep
);
}
}
void
xen_pmd_clear
(
pmd_t
*
pmdp
)
static
void
xen_pmd_clear
(
pmd_t
*
pmdp
)
{
{
set_pmd
(
pmdp
,
__pmd
(
0
));
set_pmd
(
pmdp
,
__pmd
(
0
));
}
}
#endif
/* CONFIG_X86_PAE */
#endif
/* CONFIG_X86_PAE */
pmd_t
xen_make_pmd
(
pmdval_t
pmd
)
static
pmd_t
xen_make_pmd
(
pmdval_t
pmd
)
{
{
pmd
=
pte_pfn_to_mfn
(
pmd
);
pmd
=
pte_pfn_to_mfn
(
pmd
);
return
native_make_pmd
(
pmd
);
return
native_make_pmd
(
pmd
);
...
@@ -642,13 +642,13 @@ pmd_t xen_make_pmd(pmdval_t pmd)
...
@@ -642,13 +642,13 @@ pmd_t xen_make_pmd(pmdval_t pmd)
PV_CALLEE_SAVE_REGS_THUNK
(
xen_make_pmd
);
PV_CALLEE_SAVE_REGS_THUNK
(
xen_make_pmd
);
#if PAGETABLE_LEVELS == 4
#if PAGETABLE_LEVELS == 4
pudval_t
xen_pud_val
(
pud_t
pud
)
static
pudval_t
xen_pud_val
(
pud_t
pud
)
{
{
return
pte_mfn_to_pfn
(
pud
.
pud
);
return
pte_mfn_to_pfn
(
pud
.
pud
);
}
}
PV_CALLEE_SAVE_REGS_THUNK
(
xen_pud_val
);
PV_CALLEE_SAVE_REGS_THUNK
(
xen_pud_val
);
pud_t
xen_make_pud
(
pudval_t
pud
)
static
pud_t
xen_make_pud
(
pudval_t
pud
)
{
{
pud
=
pte_pfn_to_mfn
(
pud
);
pud
=
pte_pfn_to_mfn
(
pud
);
...
@@ -656,7 +656,7 @@ pud_t xen_make_pud(pudval_t pud)
...
@@ -656,7 +656,7 @@ pud_t xen_make_pud(pudval_t pud)
}
}
PV_CALLEE_SAVE_REGS_THUNK
(
xen_make_pud
);
PV_CALLEE_SAVE_REGS_THUNK
(
xen_make_pud
);
pgd_t
*
xen_get_user_pgd
(
pgd_t
*
pgd
)
static
pgd_t
*
xen_get_user_pgd
(
pgd_t
*
pgd
)
{
{
pgd_t
*
pgd_page
=
(
pgd_t
*
)(((
unsigned
long
)
pgd
)
&
PAGE_MASK
);
pgd_t
*
pgd_page
=
(
pgd_t
*
)(((
unsigned
long
)
pgd
)
&
PAGE_MASK
);
unsigned
offset
=
pgd
-
pgd_page
;
unsigned
offset
=
pgd
-
pgd_page
;
...
@@ -688,7 +688,7 @@ static void __xen_set_pgd_hyper(pgd_t *ptr, pgd_t val)
...
@@ -688,7 +688,7 @@ static void __xen_set_pgd_hyper(pgd_t *ptr, pgd_t val)
* 2. It is always pinned
* 2. It is always pinned
* 3. It has no user pagetable attached to it
* 3. It has no user pagetable attached to it
*/
*/
void
__init
xen_set_pgd_hyper
(
pgd_t
*
ptr
,
pgd_t
val
)
static
void
__init
xen_set_pgd_hyper
(
pgd_t
*
ptr
,
pgd_t
val
)
{
{
preempt_disable
();
preempt_disable
();
...
@@ -701,7 +701,7 @@ void __init xen_set_pgd_hyper(pgd_t *ptr, pgd_t val)
...
@@ -701,7 +701,7 @@ void __init xen_set_pgd_hyper(pgd_t *ptr, pgd_t val)
preempt_enable
();
preempt_enable
();
}
}
void
xen_set_pgd
(
pgd_t
*
ptr
,
pgd_t
val
)
static
void
xen_set_pgd
(
pgd_t
*
ptr
,
pgd_t
val
)
{
{
pgd_t
*
user_ptr
=
xen_get_user_pgd
(
ptr
);
pgd_t
*
user_ptr
=
xen_get_user_pgd
(
ptr
);
...
@@ -1122,14 +1122,14 @@ void xen_mm_unpin_all(void)
...
@@ -1122,14 +1122,14 @@ void xen_mm_unpin_all(void)
spin_unlock
(
&
pgd_lock
);
spin_unlock
(
&
pgd_lock
);
}
}
void
xen_activate_mm
(
struct
mm_struct
*
prev
,
struct
mm_struct
*
next
)
static
void
xen_activate_mm
(
struct
mm_struct
*
prev
,
struct
mm_struct
*
next
)
{
{
spin_lock
(
&
next
->
page_table_lock
);
spin_lock
(
&
next
->
page_table_lock
);
xen_pgd_pin
(
next
);
xen_pgd_pin
(
next
);
spin_unlock
(
&
next
->
page_table_lock
);
spin_unlock
(
&
next
->
page_table_lock
);
}
}
void
xen_dup_mmap
(
struct
mm_struct
*
oldmm
,
struct
mm_struct
*
mm
)
static
void
xen_dup_mmap
(
struct
mm_struct
*
oldmm
,
struct
mm_struct
*
mm
)
{
{
spin_lock
(
&
mm
->
page_table_lock
);
spin_lock
(
&
mm
->
page_table_lock
);
xen_pgd_pin
(
mm
);
xen_pgd_pin
(
mm
);
...
@@ -1216,7 +1216,7 @@ static void xen_drop_mm_ref(struct mm_struct *mm)
...
@@ -1216,7 +1216,7 @@ static void xen_drop_mm_ref(struct mm_struct *mm)
* pagetable because of lazy tlb flushing. This means we need need to
* pagetable because of lazy tlb flushing. This means we need need to
* switch all CPUs off this pagetable before we can unpin it.
* switch all CPUs off this pagetable before we can unpin it.
*/
*/
void
xen_exit_mmap
(
struct
mm_struct
*
mm
)
static
void
xen_exit_mmap
(
struct
mm_struct
*
mm
)
{
{
get_cpu
();
/* make sure we don't move around */
get_cpu
();
/* make sure we don't move around */
xen_drop_mm_ref
(
mm
);
xen_drop_mm_ref
(
mm
);
...
...
arch/x86/xen/mmu.h
View file @
4c13629f
...
@@ -15,43 +15,6 @@ bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn);
...
@@ -15,43 +15,6 @@ bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn);
void
set_pte_mfn
(
unsigned
long
vaddr
,
unsigned
long
pfn
,
pgprot_t
flags
);
void
set_pte_mfn
(
unsigned
long
vaddr
,
unsigned
long
pfn
,
pgprot_t
flags
);
void
xen_activate_mm
(
struct
mm_struct
*
prev
,
struct
mm_struct
*
next
);
void
xen_dup_mmap
(
struct
mm_struct
*
oldmm
,
struct
mm_struct
*
mm
);
void
xen_exit_mmap
(
struct
mm_struct
*
mm
);
pteval_t
xen_pte_val
(
pte_t
);
pmdval_t
xen_pmd_val
(
pmd_t
);
pgdval_t
xen_pgd_val
(
pgd_t
);
pte_t
xen_make_pte
(
pteval_t
);
pmd_t
xen_make_pmd
(
pmdval_t
);
pgd_t
xen_make_pgd
(
pgdval_t
);
void
xen_set_pte
(
pte_t
*
ptep
,
pte_t
pteval
);
void
xen_set_pte_at
(
struct
mm_struct
*
mm
,
unsigned
long
addr
,
pte_t
*
ptep
,
pte_t
pteval
);
#ifdef CONFIG_X86_PAE
void
xen_set_pte_atomic
(
pte_t
*
ptep
,
pte_t
pte
);
void
xen_pte_clear
(
struct
mm_struct
*
mm
,
unsigned
long
addr
,
pte_t
*
ptep
);
void
xen_pmd_clear
(
pmd_t
*
pmdp
);
#endif
/* CONFIG_X86_PAE */
void
xen_set_pmd
(
pmd_t
*
pmdp
,
pmd_t
pmdval
);
void
xen_set_pud
(
pud_t
*
ptr
,
pud_t
val
);
void
xen_set_pmd_hyper
(
pmd_t
*
pmdp
,
pmd_t
pmdval
);
void
xen_set_pud_hyper
(
pud_t
*
ptr
,
pud_t
val
);
#if PAGETABLE_LEVELS == 4
pudval_t
xen_pud_val
(
pud_t
pud
);
pud_t
xen_make_pud
(
pudval_t
pudval
);
void
xen_set_pgd
(
pgd_t
*
pgdp
,
pgd_t
pgd
);
void
xen_set_pgd_hyper
(
pgd_t
*
pgdp
,
pgd_t
pgd
);
#endif
pgd_t
*
xen_get_user_pgd
(
pgd_t
*
pgd
);
pte_t
xen_ptep_modify_prot_start
(
struct
mm_struct
*
mm
,
unsigned
long
addr
,
pte_t
*
ptep
);
pte_t
xen_ptep_modify_prot_start
(
struct
mm_struct
*
mm
,
unsigned
long
addr
,
pte_t
*
ptep
);
void
xen_ptep_modify_prot_commit
(
struct
mm_struct
*
mm
,
unsigned
long
addr
,
void
xen_ptep_modify_prot_commit
(
struct
mm_struct
*
mm
,
unsigned
long
addr
,
pte_t
*
ptep
,
pte_t
pte
);
pte_t
*
ptep
,
pte_t
pte
);
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment