Commit f8bd9f25 authored by Juergen Gross's avatar Juergen Gross Committed by Borislav Petkov

x86/mtrr: Simplify mtrr_ops initialization

The way mtrr_if is initialized with the correct mtrr_ops structure is
quite weird.

Simplify that by dropping the vendor specific init functions and the
mtrr_ops[] array. Replace those with direct assignments of the related
vendor specific ops array to mtrr_if.

Note that a direct assignment is okay even for 64-bit builds, where the
symbol isn't present, as the related code will be subject to "dead code
elimination" due to how cpu_feature_enabled() is implemented.
Signed-off-by: default avatarJuergen Gross <jgross@suse.com>
Signed-off-by: default avatarBorislav Petkov <bp@suse.de>
Link: https://lore.kernel.org/r/20221102074713.21493-17-jgross@suse.comSigned-off-by: default avatarBorislav Petkov <bp@suse.de>
parent 30f89e52
...@@ -109,7 +109,7 @@ amd_validate_add_page(unsigned long base, unsigned long size, unsigned int type) ...@@ -109,7 +109,7 @@ amd_validate_add_page(unsigned long base, unsigned long size, unsigned int type)
return 0; return 0;
} }
static const struct mtrr_ops amd_mtrr_ops = { const struct mtrr_ops amd_mtrr_ops = {
.vendor = X86_VENDOR_AMD, .vendor = X86_VENDOR_AMD,
.set = amd_set_mtrr, .set = amd_set_mtrr,
.get = amd_get_mtrr, .get = amd_get_mtrr,
...@@ -117,9 +117,3 @@ static const struct mtrr_ops amd_mtrr_ops = { ...@@ -117,9 +117,3 @@ static const struct mtrr_ops amd_mtrr_ops = {
.validate_add_page = amd_validate_add_page, .validate_add_page = amd_validate_add_page,
.have_wrcomb = positive_have_wrcomb, .have_wrcomb = positive_have_wrcomb,
}; };
int __init amd_init_mtrr(void)
{
set_mtrr_ops(&amd_mtrr_ops);
return 0;
}
...@@ -111,7 +111,7 @@ centaur_validate_add_page(unsigned long base, unsigned long size, unsigned int t ...@@ -111,7 +111,7 @@ centaur_validate_add_page(unsigned long base, unsigned long size, unsigned int t
return 0; return 0;
} }
static const struct mtrr_ops centaur_mtrr_ops = { const struct mtrr_ops centaur_mtrr_ops = {
.vendor = X86_VENDOR_CENTAUR, .vendor = X86_VENDOR_CENTAUR,
.set = centaur_set_mcr, .set = centaur_set_mcr,
.get = centaur_get_mcr, .get = centaur_get_mcr,
...@@ -119,9 +119,3 @@ static const struct mtrr_ops centaur_mtrr_ops = { ...@@ -119,9 +119,3 @@ static const struct mtrr_ops centaur_mtrr_ops = {
.validate_add_page = centaur_validate_add_page, .validate_add_page = centaur_validate_add_page,
.have_wrcomb = positive_have_wrcomb, .have_wrcomb = positive_have_wrcomb,
}; };
int __init centaur_init_mtrr(void)
{
set_mtrr_ops(&centaur_mtrr_ops);
return 0;
}
...@@ -234,7 +234,7 @@ static void cyrix_set_arr(unsigned int reg, unsigned long base, ...@@ -234,7 +234,7 @@ static void cyrix_set_arr(unsigned int reg, unsigned long base,
post_set(); post_set();
} }
static const struct mtrr_ops cyrix_mtrr_ops = { const struct mtrr_ops cyrix_mtrr_ops = {
.vendor = X86_VENDOR_CYRIX, .vendor = X86_VENDOR_CYRIX,
.set = cyrix_set_arr, .set = cyrix_set_arr,
.get = cyrix_get_arr, .get = cyrix_get_arr,
...@@ -242,9 +242,3 @@ static const struct mtrr_ops cyrix_mtrr_ops = { ...@@ -242,9 +242,3 @@ static const struct mtrr_ops cyrix_mtrr_ops = {
.validate_add_page = generic_validate_add_page, .validate_add_page = generic_validate_add_page,
.have_wrcomb = positive_have_wrcomb, .have_wrcomb = positive_have_wrcomb,
}; };
int __init cyrix_init_mtrr(void)
{
set_mtrr_ops(&cyrix_mtrr_ops);
return 0;
}
...@@ -69,16 +69,8 @@ static DEFINE_MUTEX(mtrr_mutex); ...@@ -69,16 +69,8 @@ static DEFINE_MUTEX(mtrr_mutex);
u64 size_or_mask, size_and_mask; u64 size_or_mask, size_and_mask;
static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM] __ro_after_init;
const struct mtrr_ops *mtrr_if; const struct mtrr_ops *mtrr_if;
void __init set_mtrr_ops(const struct mtrr_ops *ops)
{
if (ops->vendor && ops->vendor < X86_VENDOR_NUM)
mtrr_ops[ops->vendor] = ops;
}
/* Returns non-zero if we have the write-combining memory type */ /* Returns non-zero if we have the write-combining memory type */
static int have_wrcomb(void) static int have_wrcomb(void)
{ {
...@@ -582,20 +574,6 @@ int arch_phys_wc_index(int handle) ...@@ -582,20 +574,6 @@ int arch_phys_wc_index(int handle)
} }
EXPORT_SYMBOL_GPL(arch_phys_wc_index); EXPORT_SYMBOL_GPL(arch_phys_wc_index);
/*
* HACK ALERT!
* These should be called implicitly, but we can't yet until all the initcall
* stuff is done...
*/
static void __init init_ifs(void)
{
#ifndef CONFIG_X86_64
amd_init_mtrr();
cyrix_init_mtrr();
centaur_init_mtrr();
#endif
}
/* The suspend/resume methods are only for CPU without MTRR. CPU using generic /* The suspend/resume methods are only for CPU without MTRR. CPU using generic
* MTRR driver doesn't require this * MTRR driver doesn't require this
*/ */
...@@ -653,8 +631,6 @@ void __init mtrr_bp_init(void) ...@@ -653,8 +631,6 @@ void __init mtrr_bp_init(void)
{ {
u32 phys_addr; u32 phys_addr;
init_ifs();
phys_addr = 32; phys_addr = 32;
if (boot_cpu_has(X86_FEATURE_MTRR)) { if (boot_cpu_has(X86_FEATURE_MTRR)) {
...@@ -695,21 +671,21 @@ void __init mtrr_bp_init(void) ...@@ -695,21 +671,21 @@ void __init mtrr_bp_init(void)
case X86_VENDOR_AMD: case X86_VENDOR_AMD:
if (cpu_feature_enabled(X86_FEATURE_K6_MTRR)) { if (cpu_feature_enabled(X86_FEATURE_K6_MTRR)) {
/* Pre-Athlon (K6) AMD CPU MTRRs */ /* Pre-Athlon (K6) AMD CPU MTRRs */
mtrr_if = mtrr_ops[X86_VENDOR_AMD]; mtrr_if = &amd_mtrr_ops;
size_or_mask = SIZE_OR_MASK_BITS(32); size_or_mask = SIZE_OR_MASK_BITS(32);
size_and_mask = 0; size_and_mask = 0;
} }
break; break;
case X86_VENDOR_CENTAUR: case X86_VENDOR_CENTAUR:
if (cpu_feature_enabled(X86_FEATURE_CENTAUR_MCR)) { if (cpu_feature_enabled(X86_FEATURE_CENTAUR_MCR)) {
mtrr_if = mtrr_ops[X86_VENDOR_CENTAUR]; mtrr_if = &centaur_mtrr_ops;
size_or_mask = SIZE_OR_MASK_BITS(32); size_or_mask = SIZE_OR_MASK_BITS(32);
size_and_mask = 0; size_and_mask = 0;
} }
break; break;
case X86_VENDOR_CYRIX: case X86_VENDOR_CYRIX:
if (cpu_feature_enabled(X86_FEATURE_CYRIX_ARR)) { if (cpu_feature_enabled(X86_FEATURE_CYRIX_ARR)) {
mtrr_if = mtrr_ops[X86_VENDOR_CYRIX]; mtrr_if = &cyrix_mtrr_ops;
size_or_mask = SIZE_OR_MASK_BITS(32); size_or_mask = SIZE_OR_MASK_BITS(32);
size_and_mask = 0; size_and_mask = 0;
} }
......
...@@ -51,8 +51,6 @@ void fill_mtrr_var_range(unsigned int index, ...@@ -51,8 +51,6 @@ void fill_mtrr_var_range(unsigned int index,
u32 base_lo, u32 base_hi, u32 mask_lo, u32 mask_hi); u32 base_lo, u32 base_hi, u32 mask_lo, u32 mask_hi);
bool get_mtrr_state(void); bool get_mtrr_state(void);
extern void __init set_mtrr_ops(const struct mtrr_ops *ops);
extern u64 size_or_mask, size_and_mask; extern u64 size_or_mask, size_and_mask;
extern const struct mtrr_ops *mtrr_if; extern const struct mtrr_ops *mtrr_if;
...@@ -66,10 +64,10 @@ void mtrr_state_warn(void); ...@@ -66,10 +64,10 @@ void mtrr_state_warn(void);
const char *mtrr_attrib_to_str(int x); const char *mtrr_attrib_to_str(int x);
void mtrr_wrmsr(unsigned, unsigned, unsigned); void mtrr_wrmsr(unsigned, unsigned, unsigned);
/* CPU specific mtrr init functions */ /* CPU specific mtrr_ops vectors. */
int amd_init_mtrr(void); extern const struct mtrr_ops amd_mtrr_ops;
int cyrix_init_mtrr(void); extern const struct mtrr_ops cyrix_mtrr_ops;
int centaur_init_mtrr(void); extern const struct mtrr_ops centaur_mtrr_ops;
extern int changed_by_mtrr_cleanup; extern int changed_by_mtrr_cleanup;
extern int mtrr_cleanup(unsigned address_bits); extern int mtrr_cleanup(unsigned address_bits);
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment