Commit 93e083d4 authored by David Matlack's avatar David Matlack Committed by Paolo Bonzini

KVM: x86/mmu: Rename __gfn_to_rmap to gfn_to_rmap

gfn_to_rmap was removed in the previous patch so there is no need to
retain the double underscore on __gfn_to_rmap.
Reviewed-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
Signed-off-by: default avatarDavid Matlack <dmatlack@google.com>
Message-Id: <20210804222844.1419481-7-dmatlack@google.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 601f8af0
...@@ -1035,7 +1035,7 @@ static bool pte_list_destroy(struct kvm_rmap_head *rmap_head) ...@@ -1035,7 +1035,7 @@ static bool pte_list_destroy(struct kvm_rmap_head *rmap_head)
return true; return true;
} }
static struct kvm_rmap_head *__gfn_to_rmap(gfn_t gfn, int level, static struct kvm_rmap_head *gfn_to_rmap(gfn_t gfn, int level,
const struct kvm_memory_slot *slot) const struct kvm_memory_slot *slot)
{ {
unsigned long idx; unsigned long idx;
...@@ -1061,7 +1061,7 @@ static int rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn) ...@@ -1061,7 +1061,7 @@ static int rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn)
sp = sptep_to_sp(spte); sp = sptep_to_sp(spte);
kvm_mmu_page_set_gfn(sp, spte - sp->spt, gfn); kvm_mmu_page_set_gfn(sp, spte - sp->spt, gfn);
slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
rmap_head = __gfn_to_rmap(gfn, sp->role.level, slot); rmap_head = gfn_to_rmap(gfn, sp->role.level, slot);
return pte_list_add(vcpu, spte, rmap_head); return pte_list_add(vcpu, spte, rmap_head);
} }
...@@ -1085,7 +1085,7 @@ static void rmap_remove(struct kvm *kvm, u64 *spte) ...@@ -1085,7 +1085,7 @@ static void rmap_remove(struct kvm *kvm, u64 *spte)
slots = kvm_memslots_for_spte_role(kvm, sp->role); slots = kvm_memslots_for_spte_role(kvm, sp->role);
slot = __gfn_to_memslot(slots, gfn); slot = __gfn_to_memslot(slots, gfn);
rmap_head = __gfn_to_rmap(gfn, sp->role.level, slot); rmap_head = gfn_to_rmap(gfn, sp->role.level, slot);
__pte_list_remove(spte, rmap_head); __pte_list_remove(spte, rmap_head);
} }
...@@ -1307,7 +1307,7 @@ static void kvm_mmu_write_protect_pt_masked(struct kvm *kvm, ...@@ -1307,7 +1307,7 @@ static void kvm_mmu_write_protect_pt_masked(struct kvm *kvm,
return; return;
while (mask) { while (mask) {
rmap_head = __gfn_to_rmap(slot->base_gfn + gfn_offset + __ffs(mask), rmap_head = gfn_to_rmap(slot->base_gfn + gfn_offset + __ffs(mask),
PG_LEVEL_4K, slot); PG_LEVEL_4K, slot);
__rmap_write_protect(kvm, rmap_head, false); __rmap_write_protect(kvm, rmap_head, false);
...@@ -1340,7 +1340,7 @@ static void kvm_mmu_clear_dirty_pt_masked(struct kvm *kvm, ...@@ -1340,7 +1340,7 @@ static void kvm_mmu_clear_dirty_pt_masked(struct kvm *kvm,
return; return;
while (mask) { while (mask) {
rmap_head = __gfn_to_rmap(slot->base_gfn + gfn_offset + __ffs(mask), rmap_head = gfn_to_rmap(slot->base_gfn + gfn_offset + __ffs(mask),
PG_LEVEL_4K, slot); PG_LEVEL_4K, slot);
__rmap_clear_dirty(kvm, rmap_head, slot); __rmap_clear_dirty(kvm, rmap_head, slot);
...@@ -1407,7 +1407,7 @@ bool kvm_mmu_slot_gfn_write_protect(struct kvm *kvm, ...@@ -1407,7 +1407,7 @@ bool kvm_mmu_slot_gfn_write_protect(struct kvm *kvm,
if (kvm_memslots_have_rmaps(kvm)) { if (kvm_memslots_have_rmaps(kvm)) {
for (i = min_level; i <= KVM_MAX_HUGEPAGE_LEVEL; ++i) { for (i = min_level; i <= KVM_MAX_HUGEPAGE_LEVEL; ++i) {
rmap_head = __gfn_to_rmap(gfn, i, slot); rmap_head = gfn_to_rmap(gfn, i, slot);
write_protected |= __rmap_write_protect(kvm, rmap_head, true); write_protected |= __rmap_write_protect(kvm, rmap_head, true);
} }
} }
...@@ -1502,9 +1502,8 @@ rmap_walk_init_level(struct slot_rmap_walk_iterator *iterator, int level) ...@@ -1502,9 +1502,8 @@ rmap_walk_init_level(struct slot_rmap_walk_iterator *iterator, int level)
{ {
iterator->level = level; iterator->level = level;
iterator->gfn = iterator->start_gfn; iterator->gfn = iterator->start_gfn;
iterator->rmap = __gfn_to_rmap(iterator->gfn, level, iterator->slot); iterator->rmap = gfn_to_rmap(iterator->gfn, level, iterator->slot);
iterator->end_rmap = __gfn_to_rmap(iterator->end_gfn, level, iterator->end_rmap = gfn_to_rmap(iterator->end_gfn, level, iterator->slot);
iterator->slot);
} }
static void static void
...@@ -1630,7 +1629,7 @@ static void rmap_recycle(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn) ...@@ -1630,7 +1629,7 @@ static void rmap_recycle(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn)
sp = sptep_to_sp(spte); sp = sptep_to_sp(spte);
slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
rmap_head = __gfn_to_rmap(gfn, sp->role.level, slot); rmap_head = gfn_to_rmap(gfn, sp->role.level, slot);
kvm_unmap_rmapp(vcpu->kvm, rmap_head, NULL, gfn, sp->role.level, __pte(0)); kvm_unmap_rmapp(vcpu->kvm, rmap_head, NULL, gfn, sp->role.level, __pte(0));
kvm_flush_remote_tlbs_with_address(vcpu->kvm, sp->gfn, kvm_flush_remote_tlbs_with_address(vcpu->kvm, sp->gfn,
......
...@@ -147,7 +147,7 @@ static void inspect_spte_has_rmap(struct kvm *kvm, u64 *sptep) ...@@ -147,7 +147,7 @@ static void inspect_spte_has_rmap(struct kvm *kvm, u64 *sptep)
return; return;
} }
rmap_head = __gfn_to_rmap(gfn, rev_sp->role.level, slot); rmap_head = gfn_to_rmap(gfn, rev_sp->role.level, slot);
if (!rmap_head->val) { if (!rmap_head->val) {
if (!__ratelimit(&ratelimit_state)) if (!__ratelimit(&ratelimit_state))
return; return;
...@@ -200,7 +200,7 @@ static void audit_write_protection(struct kvm *kvm, struct kvm_mmu_page *sp) ...@@ -200,7 +200,7 @@ static void audit_write_protection(struct kvm *kvm, struct kvm_mmu_page *sp)
slots = kvm_memslots_for_spte_role(kvm, sp->role); slots = kvm_memslots_for_spte_role(kvm, sp->role);
slot = __gfn_to_memslot(slots, sp->gfn); slot = __gfn_to_memslot(slots, sp->gfn);
rmap_head = __gfn_to_rmap(sp->gfn, PG_LEVEL_4K, slot); rmap_head = gfn_to_rmap(sp->gfn, PG_LEVEL_4K, slot);
for_each_rmap_spte(rmap_head, &iter, sptep) { for_each_rmap_spte(rmap_head, &iter, sptep) {
if (is_writable_pte(*sptep)) if (is_writable_pte(*sptep))
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment