Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
nexedi
linux
Commits
f691fe1d
Commit
f691fe1d
authored
Jul 06, 2009
by
Avi Kivity
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
KVM: Trace shadow page lifecycle
Create, sync, unsync, zap. Signed-off-by:
Avi Kivity
<
avi@redhat.com
>
parent
9c1b96e3
Changes
2
Hide whitespace changes
Inline
Side-by-side
Showing
2 changed files
with
109 additions
and
4 deletions
+109
-4
arch/x86/kvm/mmu.c
arch/x86/kvm/mmu.c
+6
-4
arch/x86/kvm/mmutrace.h
arch/x86/kvm/mmutrace.h
+103
-0
No files found.
arch/x86/kvm/mmu.c
View file @
f691fe1d
...
...
@@ -1122,6 +1122,7 @@ static int kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
return
1
;
}
trace_kvm_mmu_sync_page
(
sp
);
if
(
rmap_write_protect
(
vcpu
->
kvm
,
sp
->
gfn
))
kvm_flush_remote_tlbs
(
vcpu
->
kvm
);
kvm_unlink_unsync_page
(
vcpu
->
kvm
,
sp
);
...
...
@@ -1244,8 +1245,6 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
quadrant
&=
(
1
<<
((
PT32_PT_BITS
-
PT64_PT_BITS
)
*
level
))
-
1
;
role
.
quadrant
=
quadrant
;
}
pgprintk
(
"%s: looking gfn %lx role %x
\n
"
,
__func__
,
gfn
,
role
.
word
);
index
=
kvm_page_table_hashfn
(
gfn
);
bucket
=
&
vcpu
->
kvm
->
arch
.
mmu_page_hash
[
index
];
hlist_for_each_entry_safe
(
sp
,
node
,
tmp
,
bucket
,
hash_link
)
...
...
@@ -1262,14 +1261,13 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
set_bit
(
KVM_REQ_MMU_SYNC
,
&
vcpu
->
requests
);
kvm_mmu_mark_parents_unsync
(
vcpu
,
sp
);
}
pgprintk
(
"%s: found
\n
"
,
__func__
);
trace_kvm_mmu_get_page
(
sp
,
false
);
return
sp
;
}
++
vcpu
->
kvm
->
stat
.
mmu_cache_miss
;
sp
=
kvm_mmu_alloc_page
(
vcpu
,
parent_pte
);
if
(
!
sp
)
return
sp
;
pgprintk
(
"%s: adding gfn %lx role %x
\n
"
,
__func__
,
gfn
,
role
.
word
);
sp
->
gfn
=
gfn
;
sp
->
role
=
role
;
hlist_add_head
(
&
sp
->
hash_link
,
bucket
);
...
...
@@ -1282,6 +1280,7 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
vcpu
->
arch
.
mmu
.
prefetch_page
(
vcpu
,
sp
);
else
nonpaging_prefetch_page
(
vcpu
,
sp
);
trace_kvm_mmu_get_page
(
sp
,
true
);
return
sp
;
}
...
...
@@ -1410,6 +1409,8 @@ static int mmu_zap_unsync_children(struct kvm *kvm,
static
int
kvm_mmu_zap_page
(
struct
kvm
*
kvm
,
struct
kvm_mmu_page
*
sp
)
{
int
ret
;
trace_kvm_mmu_zap_page
(
sp
);
++
kvm
->
stat
.
mmu_shadow_zapped
;
ret
=
mmu_zap_unsync_children
(
kvm
,
sp
);
kvm_mmu_page_unlink_children
(
kvm
,
sp
);
...
...
@@ -1656,6 +1657,7 @@ static int kvm_unsync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
struct
kvm_mmu_page
*
s
;
struct
hlist_node
*
node
,
*
n
;
trace_kvm_mmu_unsync_page
(
sp
);
index
=
kvm_page_table_hashfn
(
sp
->
gfn
);
bucket
=
&
vcpu
->
kvm
->
arch
.
mmu_page_hash
[
index
];
/* don't unsync if pagetable is shadowed with multiple roles */
...
...
arch/x86/kvm/mmutrace.h
View file @
f691fe1d
...
...
@@ -2,12 +2,48 @@
#define _TRACE_KVMMMU_H
#include <linux/tracepoint.h>
#include <linux/ftrace_event.h>
#undef TRACE_SYSTEM
#define TRACE_SYSTEM kvmmmu
#define TRACE_INCLUDE_PATH .
#define TRACE_INCLUDE_FILE mmutrace
#define KVM_MMU_PAGE_FIELDS \
__field(__u64, gfn) \
__field(__u32, role) \
__field(__u32, root_count) \
__field(__u32, unsync)
#define KVM_MMU_PAGE_ASSIGN(sp) \
__entry->gfn = sp->gfn; \
__entry->role = sp->role.word; \
__entry->root_count = sp->root_count; \
__entry->unsync = sp->unsync;
#define KVM_MMU_PAGE_PRINTK() ({ \
const char *ret = p->buffer + p->len; \
static const char *access_str[] = { \
"---", "--x", "w--", "w-x", "-u-", "-ux", "wu-", "wux" \
}; \
union kvm_mmu_page_role role; \
\
role.word = __entry->role; \
\
trace_seq_printf(p, "sp gfn %llx %u/%u q%u%s %s%s %spge" \
" %snxe root %u %s%c", \
__entry->gfn, role.level, role.glevels, \
role.quadrant, \
role.direct ? " direct" : "", \
access_str[role.access], \
role.invalid ? " invalid" : "", \
role.cr4_pge ? "" : "!", \
role.nxe ? "" : "!", \
__entry->root_count, \
__entry->unsync ? "unsync" : "sync", 0); \
ret; \
})
#define kvm_mmu_trace_pferr_flags \
{ PFERR_PRESENT_MASK, "P" }, \
{ PFERR_WRITE_MASK, "W" }, \
...
...
@@ -111,6 +147,73 @@ TRACE_EVENT(
__print_flags
(
__entry
->
pferr
,
"|"
,
kvm_mmu_trace_pferr_flags
))
);
TRACE_EVENT
(
kvm_mmu_get_page
,
TP_PROTO
(
struct
kvm_mmu_page
*
sp
,
bool
created
),
TP_ARGS
(
sp
,
created
),
TP_STRUCT__entry
(
KVM_MMU_PAGE_FIELDS
__field
(
bool
,
created
)
),
TP_fast_assign
(
KVM_MMU_PAGE_ASSIGN
(
sp
)
__entry
->
created
=
created
;
),
TP_printk
(
"%s %s"
,
KVM_MMU_PAGE_PRINTK
(),
__entry
->
created
?
"new"
:
"existing"
)
);
TRACE_EVENT
(
kvm_mmu_sync_page
,
TP_PROTO
(
struct
kvm_mmu_page
*
sp
),
TP_ARGS
(
sp
),
TP_STRUCT__entry
(
KVM_MMU_PAGE_FIELDS
),
TP_fast_assign
(
KVM_MMU_PAGE_ASSIGN
(
sp
)
),
TP_printk
(
"%s"
,
KVM_MMU_PAGE_PRINTK
())
);
TRACE_EVENT
(
kvm_mmu_unsync_page
,
TP_PROTO
(
struct
kvm_mmu_page
*
sp
),
TP_ARGS
(
sp
),
TP_STRUCT__entry
(
KVM_MMU_PAGE_FIELDS
),
TP_fast_assign
(
KVM_MMU_PAGE_ASSIGN
(
sp
)
),
TP_printk
(
"%s"
,
KVM_MMU_PAGE_PRINTK
())
);
TRACE_EVENT
(
kvm_mmu_zap_page
,
TP_PROTO
(
struct
kvm_mmu_page
*
sp
),
TP_ARGS
(
sp
),
TP_STRUCT__entry
(
KVM_MMU_PAGE_FIELDS
),
TP_fast_assign
(
KVM_MMU_PAGE_ASSIGN
(
sp
)
),
TP_printk
(
"%s"
,
KVM_MMU_PAGE_PRINTK
())
);
#endif
/* _TRACE_KVMMMU_H */
/* This part must be outside protection */
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment