Commit 82db338f authored by Paolo Bonzini's avatar Paolo Bonzini Committed by Stefan Bader

KVM: x86: add tracepoints around __direct_map and FNAME(fetch)

These are useful in debugging shadow paging.
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>

CVE-2018-12207

(backported from commit 335e192a)
[tyhicks: Backport to 4.4
 - Continue to use pfn_t instead of kvm_pfn_t
 - Remove the use of shadow_present_mask in the kvm_mmu_set_spte trace
   point since we don't have commit ffb128c8 ("kvm: mmu: don't set
   the present bit unconditionally")
 - Open code is_executable_pte() in the kvm_mmu_set_spte() trace point
   since that function doesn't exit]
Signed-off-by: default avatarTyler Hicks <tyhicks@canonical.com>
Signed-off-by: default avatarStefan Bader <stefan.bader@canonical.com>
parent ede7288e
...@@ -129,9 +129,6 @@ module_param(dbg, bool, 0644); ...@@ -129,9 +129,6 @@ module_param(dbg, bool, 0644);
#include <trace/events/kvm.h> #include <trace/events/kvm.h>
#define CREATE_TRACE_POINTS
#include "mmutrace.h"
#define SPTE_HOST_WRITEABLE (1ULL << PT_FIRST_AVAIL_BITS_SHIFT) #define SPTE_HOST_WRITEABLE (1ULL << PT_FIRST_AVAIL_BITS_SHIFT)
#define SPTE_MMU_WRITEABLE (1ULL << (PT_FIRST_AVAIL_BITS_SHIFT + 1)) #define SPTE_MMU_WRITEABLE (1ULL << (PT_FIRST_AVAIL_BITS_SHIFT + 1))
...@@ -192,6 +189,10 @@ static u64 __read_mostly shadow_mmio_mask; ...@@ -192,6 +189,10 @@ static u64 __read_mostly shadow_mmio_mask;
static void mmu_spte_set(u64 *sptep, u64 spte); static void mmu_spte_set(u64 *sptep, u64 spte);
static void mmu_free_roots(struct kvm_vcpu *vcpu); static void mmu_free_roots(struct kvm_vcpu *vcpu);
#define CREATE_TRACE_POINTS
#include "mmutrace.h"
void kvm_mmu_set_mmio_spte_mask(u64 mmio_mask) void kvm_mmu_set_mmio_spte_mask(u64 mmio_mask)
{ {
shadow_mmio_mask = mmio_mask; shadow_mmio_mask = mmio_mask;
...@@ -2635,10 +2636,7 @@ static int mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep, unsigned pte_access, ...@@ -2635,10 +2636,7 @@ static int mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep, unsigned pte_access,
ret = RET_PF_EMULATE; ret = RET_PF_EMULATE;
pgprintk("%s: setting spte %llx\n", __func__, *sptep); pgprintk("%s: setting spte %llx\n", __func__, *sptep);
pgprintk("instantiating %s PTE (%s) at %llx (%llx) addr %p\n", trace_kvm_mmu_set_spte(level, gfn, sptep);
is_large_pte(*sptep)? "2MB" : "4kB",
*sptep & PT_PRESENT_MASK ?"RW":"R", gfn,
*sptep, sptep);
if (!was_rmapped && is_large_pte(*sptep)) if (!was_rmapped && is_large_pte(*sptep))
++vcpu->kvm->stat.lpages; ++vcpu->kvm->stat.lpages;
...@@ -2749,6 +2747,7 @@ static int __direct_map(struct kvm_vcpu *vcpu, gpa_t gpa, int write, ...@@ -2749,6 +2747,7 @@ static int __direct_map(struct kvm_vcpu *vcpu, gpa_t gpa, int write,
if (!VALID_PAGE(vcpu->arch.mmu.root_hpa)) if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
return RET_PF_RETRY; return RET_PF_RETRY;
trace_kvm_mmu_spte_requested(gpa, level, pfn);
for_each_shadow_entry(vcpu, gpa, it) { for_each_shadow_entry(vcpu, gpa, it) {
base_gfn = gfn & ~(KVM_PAGES_PER_HPAGE(it.level) - 1); base_gfn = gfn & ~(KVM_PAGES_PER_HPAGE(it.level) - 1);
if (it.level == level) if (it.level == level)
......
...@@ -322,6 +322,65 @@ TRACE_EVENT( ...@@ -322,6 +322,65 @@ TRACE_EVENT(
__entry->kvm_gen == __entry->spte_gen __entry->kvm_gen == __entry->spte_gen
) )
); );
TRACE_EVENT(
kvm_mmu_set_spte,
TP_PROTO(int level, gfn_t gfn, u64 *sptep),
TP_ARGS(level, gfn, sptep),
TP_STRUCT__entry(
__field(u64, gfn)
__field(u64, spte)
__field(u64, sptep)
__field(u8, level)
/* These depend on page entry type, so compute them now. */
__field(bool, r)
__field(bool, x)
__field(u8, u)
),
TP_fast_assign(
__entry->gfn = gfn;
__entry->spte = *sptep;
__entry->sptep = virt_to_phys(sptep);
__entry->level = level;
__entry->r = __entry->spte & PT_PRESENT_MASK;
__entry->x = (__entry->spte & (shadow_x_mask | shadow_nx_mask)) == shadow_x_mask;
__entry->u = shadow_user_mask ? !!(__entry->spte & shadow_user_mask) : -1;
),
TP_printk("gfn %llx spte %llx (%s%s%s%s) level %d at %llx",
__entry->gfn, __entry->spte,
__entry->r ? "r" : "-",
__entry->spte & PT_WRITABLE_MASK ? "w" : "-",
__entry->x ? "x" : "-",
__entry->u == -1 ? "" : (__entry->u ? "u" : "-"),
__entry->level, __entry->sptep
)
);
TRACE_EVENT(
kvm_mmu_spte_requested,
TP_PROTO(gpa_t addr, int level, pfn_t pfn),
TP_ARGS(addr, level, pfn),
TP_STRUCT__entry(
__field(u64, gfn)
__field(u64, pfn)
__field(u8, level)
),
TP_fast_assign(
__entry->gfn = addr >> PAGE_SHIFT;
__entry->pfn = pfn | (__entry->gfn & (KVM_PAGES_PER_HPAGE(level) - 1));
__entry->level = level;
),
TP_printk("gfn %llx pfn %llx level %d",
__entry->gfn, __entry->pfn, __entry->level
)
);
#endif /* _TRACE_KVMMMU_H */ #endif /* _TRACE_KVMMMU_H */
#undef TRACE_INCLUDE_PATH #undef TRACE_INCLUDE_PATH
......
...@@ -605,6 +605,8 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr, ...@@ -605,6 +605,8 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
base_gfn = gw->gfn; base_gfn = gw->gfn;
trace_kvm_mmu_spte_requested(addr, gw->level, pfn);
for (; shadow_walk_okay(&it); shadow_walk_next(&it)) { for (; shadow_walk_okay(&it); shadow_walk_next(&it)) {
clear_sp_write_flooding_count(it.sptep); clear_sp_write_flooding_count(it.sptep);
base_gfn = gw->gfn & ~(KVM_PAGES_PER_HPAGE(it.level) - 1); base_gfn = gw->gfn & ~(KVM_PAGES_PER_HPAGE(it.level) - 1);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment