Commit 1346bbb6 authored by David Matlack's avatar David Matlack Committed by Paolo Bonzini

KVM: x86/mmu: Rename __rmap_write_protect() to rmap_write_protect()

The function formerly known as rmap_write_protect() has been renamed to
kvm_vcpu_write_protect_gfn(), so we can get rid of the double
underscores in front of __rmap_write_protect().

No functional change intended.
Reviewed-by: default avatarBen Gardon <bgardon@google.com>
Reviewed-by: default avatarPeter Xu <peterx@redhat.com>
Reviewed-by: default avatarSean Christopherson <seanjc@google.com>
Signed-off-by: default avatarDavid Matlack <dmatlack@google.com>
Message-Id: <20220119230739.2234394-3-dmatlack@google.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent cf48f9e2
...@@ -1228,8 +1228,8 @@ static bool spte_write_protect(u64 *sptep, bool pt_protect) ...@@ -1228,8 +1228,8 @@ static bool spte_write_protect(u64 *sptep, bool pt_protect)
return mmu_spte_update(sptep, spte); return mmu_spte_update(sptep, spte);
} }
static bool __rmap_write_protect(struct kvm_rmap_head *rmap_head, static bool rmap_write_protect(struct kvm_rmap_head *rmap_head,
bool pt_protect) bool pt_protect)
{ {
u64 *sptep; u64 *sptep;
struct rmap_iterator iter; struct rmap_iterator iter;
...@@ -1309,7 +1309,7 @@ static void kvm_mmu_write_protect_pt_masked(struct kvm *kvm, ...@@ -1309,7 +1309,7 @@ static void kvm_mmu_write_protect_pt_masked(struct kvm *kvm,
while (mask) { while (mask) {
rmap_head = gfn_to_rmap(slot->base_gfn + gfn_offset + __ffs(mask), rmap_head = gfn_to_rmap(slot->base_gfn + gfn_offset + __ffs(mask),
PG_LEVEL_4K, slot); PG_LEVEL_4K, slot);
__rmap_write_protect(rmap_head, false); rmap_write_protect(rmap_head, false);
/* clear the first set bit */ /* clear the first set bit */
mask &= mask - 1; mask &= mask - 1;
...@@ -1408,7 +1408,7 @@ bool kvm_mmu_slot_gfn_write_protect(struct kvm *kvm, ...@@ -1408,7 +1408,7 @@ bool kvm_mmu_slot_gfn_write_protect(struct kvm *kvm,
if (kvm_memslots_have_rmaps(kvm)) { if (kvm_memslots_have_rmaps(kvm)) {
for (i = min_level; i <= KVM_MAX_HUGEPAGE_LEVEL; ++i) { for (i = min_level; i <= KVM_MAX_HUGEPAGE_LEVEL; ++i) {
rmap_head = gfn_to_rmap(gfn, i, slot); rmap_head = gfn_to_rmap(gfn, i, slot);
write_protected |= __rmap_write_protect(rmap_head, true); write_protected |= rmap_write_protect(rmap_head, true);
} }
} }
...@@ -5798,7 +5798,7 @@ static bool slot_rmap_write_protect(struct kvm *kvm, ...@@ -5798,7 +5798,7 @@ static bool slot_rmap_write_protect(struct kvm *kvm,
struct kvm_rmap_head *rmap_head, struct kvm_rmap_head *rmap_head,
const struct kvm_memory_slot *slot) const struct kvm_memory_slot *slot)
{ {
return __rmap_write_protect(rmap_head, false); return rmap_write_protect(rmap_head, false);
} }
void kvm_mmu_slot_remove_write_access(struct kvm *kvm, void kvm_mmu_slot_remove_write_access(struct kvm *kvm,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment