Commit a1419f8b authored by Sean Christopherson's avatar Sean Christopherson Committed by Paolo Bonzini

KVM: x86: Fold "write-protect large" use case into generic write-protect

Drop kvm_mmu_slot_largepage_remove_write_access() and refactor its sole
caller to use kvm_mmu_slot_remove_write_access().  Remove the now-unused
slot_handle_large_level() and slot_handle_all_level() helpers.

No functional change intended.
Signed-off-by: default avatarSean Christopherson <seanjc@google.com>
Message-Id: <20210213005015.1651772-14-seanjc@google.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent b6e16ae5
......@@ -5204,22 +5204,6 @@ slot_handle_level(struct kvm *kvm, struct kvm_memory_slot *memslot,
lock_flush_tlb);
}
static __always_inline bool
slot_handle_all_level(struct kvm *kvm, struct kvm_memory_slot *memslot,
slot_level_handler fn, bool lock_flush_tlb)
{
return slot_handle_level(kvm, memslot, fn, PG_LEVEL_4K,
KVM_MAX_HUGEPAGE_LEVEL, lock_flush_tlb);
}
static __always_inline bool
slot_handle_large_level(struct kvm *kvm, struct kvm_memory_slot *memslot,
slot_level_handler fn, bool lock_flush_tlb)
{
return slot_handle_level(kvm, memslot, fn, PG_LEVEL_4K + 1,
KVM_MAX_HUGEPAGE_LEVEL, lock_flush_tlb);
}
static __always_inline bool
slot_handle_leaf(struct kvm *kvm, struct kvm_memory_slot *memslot,
slot_level_handler fn, bool lock_flush_tlb)
......@@ -5584,22 +5568,6 @@ void kvm_mmu_slot_leaf_clear_dirty(struct kvm *kvm,
kvm_arch_flush_remote_tlbs_memslot(kvm, memslot);
}
void kvm_mmu_slot_largepage_remove_write_access(struct kvm *kvm,
struct kvm_memory_slot *memslot)
{
bool flush;
write_lock(&kvm->mmu_lock);
flush = slot_handle_large_level(kvm, memslot, slot_rmap_write_protect,
false);
if (is_tdp_mmu_enabled(kvm))
flush |= kvm_tdp_mmu_wrprot_slot(kvm, memslot, PG_LEVEL_2M);
write_unlock(&kvm->mmu_lock);
if (flush)
kvm_arch_flush_remote_tlbs_memslot(kvm, memslot);
}
void kvm_mmu_zap_all(struct kvm *kvm)
{
struct kvm_mmu_page *sp, *node;
......
......@@ -10829,24 +10829,25 @@ static void kvm_mmu_slot_apply_flags(struct kvm *kvm,
*/
kvm_mmu_zap_collapsible_sptes(kvm, new);
} else {
/* By default, write-protect everything to log writes. */
int level = PG_LEVEL_4K;
if (kvm_x86_ops.cpu_dirty_log_size) {
/*
* Large sptes are write-protected so they can be split on first
* write. New large sptes cannot be created for this slot until
* the end of the logging. See the comments in fast_page_fault().
*
* For small sptes, nothing is done if the dirty log is in the
* initial-all-set state. Otherwise, depending on whether pml
* is enabled the D-bit or the W-bit will be cleared.
* Clear all dirty bits, unless pages are treated as
* dirty from the get-go.
*/
if (kvm_x86_ops.cpu_dirty_log_size) {
if (!kvm_dirty_log_manual_protect_and_init_set(kvm))
kvm_mmu_slot_leaf_clear_dirty(kvm, new);
kvm_mmu_slot_largepage_remove_write_access(kvm, new);
} else {
int level =
kvm_dirty_log_manual_protect_and_init_set(kvm) ?
PG_LEVEL_2M : PG_LEVEL_4K;
/*
* Write-protect large pages on write so that dirty
* logging happens at 4k granularity. No need to
* write-protect small SPTEs since write accesses are
* logged by the CPU via dirty bits.
*/
level = PG_LEVEL_2M;
} else if (kvm_dirty_log_manual_protect_and_init_set(kvm)) {
/*
* If we're with initial-all-set, we don't need
* to write protect any small page because
......@@ -10855,8 +10856,9 @@ static void kvm_mmu_slot_apply_flags(struct kvm *kvm,
* so that the page split can happen lazily on
* the first write to the huge page.
*/
kvm_mmu_slot_remove_write_access(kvm, new, level);
level = PG_LEVEL_2M;
}
kvm_mmu_slot_remove_write_access(kvm, new, level);
}
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment