Commit 4ad7149e authored by Juergen Gross's avatar Juergen Gross Committed by Borislav Petkov

x86/mtrr: Split MTRR-specific handling from cache dis/enabling

Split the MTRR-specific actions from cache_disable() and cache_enable()
into new functions mtrr_disable() and mtrr_enable().
Signed-off-by: default avatarJuergen Gross <jgross@suse.com>
Signed-off-by: default avatarBorislav Petkov <bp@suse.de>
Link: https://lore.kernel.org/r/20221102074713.21493-6-jgross@suse.comSigned-off-by: default avatarBorislav Petkov <bp@suse.de>
parent d5f66d5d
...@@ -48,6 +48,8 @@ extern void mtrr_aps_init(void); ...@@ -48,6 +48,8 @@ extern void mtrr_aps_init(void);
extern void mtrr_bp_restore(void); extern void mtrr_bp_restore(void);
extern int mtrr_trim_uncached_memory(unsigned long end_pfn); extern int mtrr_trim_uncached_memory(unsigned long end_pfn);
extern int amd_special_default_mtrr(void); extern int amd_special_default_mtrr(void);
void mtrr_disable(void);
void mtrr_enable(void);
# else # else
static inline u8 mtrr_type_lookup(u64 addr, u64 end, u8 *uniform) static inline u8 mtrr_type_lookup(u64 addr, u64 end, u8 *uniform)
{ {
...@@ -87,6 +89,8 @@ static inline void mtrr_centaur_report_mcr(int mcr, u32 lo, u32 hi) ...@@ -87,6 +89,8 @@ static inline void mtrr_centaur_report_mcr(int mcr, u32 lo, u32 hi)
#define set_mtrr_aps_delayed_init() do {} while (0) #define set_mtrr_aps_delayed_init() do {} while (0)
#define mtrr_aps_init() do {} while (0) #define mtrr_aps_init() do {} while (0)
#define mtrr_bp_restore() do {} while (0) #define mtrr_bp_restore() do {} while (0)
#define mtrr_disable() do {} while (0)
#define mtrr_enable() do {} while (0)
# endif # endif
#ifdef CONFIG_COMPAT #ifdef CONFIG_COMPAT
......
...@@ -716,6 +716,21 @@ static unsigned long set_mtrr_state(void) ...@@ -716,6 +716,21 @@ static unsigned long set_mtrr_state(void)
return change_mask; return change_mask;
} }
void mtrr_disable(void)
{
/* Save MTRR state */
rdmsr(MSR_MTRRdefType, deftype_lo, deftype_hi);
/* Disable MTRRs, and set the default type to uncached */
mtrr_wrmsr(MSR_MTRRdefType, deftype_lo & ~0xcff, deftype_hi);
}
void mtrr_enable(void)
{
/* Intel (P6) standard MTRRs */
mtrr_wrmsr(MSR_MTRRdefType, deftype_lo, deftype_hi);
}
/* /*
* Disable and enable caches. Needed for changing MTRRs and the PAT MSR. * Disable and enable caches. Needed for changing MTRRs and the PAT MSR.
* *
...@@ -764,11 +779,8 @@ void cache_disable(void) __acquires(cache_disable_lock) ...@@ -764,11 +779,8 @@ void cache_disable(void) __acquires(cache_disable_lock)
count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL); count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
flush_tlb_local(); flush_tlb_local();
/* Save MTRR state */ if (cpu_feature_enabled(X86_FEATURE_MTRR))
rdmsr(MSR_MTRRdefType, deftype_lo, deftype_hi); mtrr_disable();
/* Disable MTRRs, and set the default type to uncached */
mtrr_wrmsr(MSR_MTRRdefType, deftype_lo & ~0xcff, deftype_hi);
/* Again, only flush caches if we have to. */ /* Again, only flush caches if we have to. */
if (!static_cpu_has(X86_FEATURE_SELFSNOOP)) if (!static_cpu_has(X86_FEATURE_SELFSNOOP))
...@@ -781,8 +793,8 @@ void cache_enable(void) __releases(cache_disable_lock) ...@@ -781,8 +793,8 @@ void cache_enable(void) __releases(cache_disable_lock)
count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL); count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
flush_tlb_local(); flush_tlb_local();
/* Intel (P6) standard MTRRs */ if (cpu_feature_enabled(X86_FEATURE_MTRR))
mtrr_wrmsr(MSR_MTRRdefType, deftype_lo, deftype_hi); mtrr_enable();
/* Enable caches */ /* Enable caches */
write_cr0(read_cr0() & ~X86_CR0_CD); write_cr0(read_cr0() & ~X86_CR0_CD);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment