Commit c043eaaa authored by Sean Christopherson's avatar Sean Christopherson

KVM: x86/mmu: Snapshot shadow_phys_bits when kvm.ko is loaded

Snapshot shadow_phys_bits when kvm.ko is loaded, not when a vendor module
is loaded, to guard against usage of shadow_phys_bits before it is
initialized.  The computation isn't vendor specific in any way, i.e. there
there is no reason to wait to snapshot the value until a vendor module is
loaded, nor is there any reason to recompute the value every time a vendor
module is loaded.

Opportunistically convert it from "read mostly" to "read-only after init".

Link: https://lore.kernel.org/r/20240423221521.2923759-4-seanjc@google.comSigned-off-by: default avatarSean Christopherson <seanjc@google.com>
parent 52c47f58
...@@ -61,7 +61,7 @@ static __always_inline u64 rsvd_bits(int s, int e) ...@@ -61,7 +61,7 @@ static __always_inline u64 rsvd_bits(int s, int e)
* The number of non-reserved physical address bits irrespective of features * The number of non-reserved physical address bits irrespective of features
* that repurpose legal bits, e.g. MKTME. * that repurpose legal bits, e.g. MKTME.
*/ */
extern u8 __read_mostly shadow_phys_bits; extern u8 __ro_after_init shadow_phys_bits;
static inline gfn_t kvm_mmu_max_gfn(void) static inline gfn_t kvm_mmu_max_gfn(void)
{ {
......
...@@ -43,7 +43,7 @@ u64 __read_mostly shadow_acc_track_mask; ...@@ -43,7 +43,7 @@ u64 __read_mostly shadow_acc_track_mask;
u64 __read_mostly shadow_nonpresent_or_rsvd_mask; u64 __read_mostly shadow_nonpresent_or_rsvd_mask;
u64 __read_mostly shadow_nonpresent_or_rsvd_lower_gfn_mask; u64 __read_mostly shadow_nonpresent_or_rsvd_lower_gfn_mask;
u8 __read_mostly shadow_phys_bits; u8 __ro_after_init shadow_phys_bits;
void __init kvm_mmu_spte_module_init(void) void __init kvm_mmu_spte_module_init(void)
{ {
...@@ -55,6 +55,8 @@ void __init kvm_mmu_spte_module_init(void) ...@@ -55,6 +55,8 @@ void __init kvm_mmu_spte_module_init(void)
* will change when the vendor module is (re)loaded. * will change when the vendor module is (re)loaded.
*/ */
allow_mmio_caching = enable_mmio_caching; allow_mmio_caching = enable_mmio_caching;
shadow_phys_bits = kvm_get_shadow_phys_bits();
} }
static u64 generation_mmio_spte_mask(u64 gen) static u64 generation_mmio_spte_mask(u64 gen)
...@@ -441,8 +443,6 @@ void kvm_mmu_reset_all_pte_masks(void) ...@@ -441,8 +443,6 @@ void kvm_mmu_reset_all_pte_masks(void)
u8 low_phys_bits; u8 low_phys_bits;
u64 mask; u64 mask;
shadow_phys_bits = kvm_get_shadow_phys_bits();
/* /*
* If the CPU has 46 or less physical address bits, then set an * If the CPU has 46 or less physical address bits, then set an
* appropriate mask to guard against L1TF attacks. Otherwise, it is * appropriate mask to guard against L1TF attacks. Otherwise, it is
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment