Commit dc43fc75 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'x86_mtrr_for_v6.5' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 mtrr updates from Borislav Petkov:
 "A serious scrubbing of the MTRR code including adding a new map
  mechanism in order to look up the memory type of a region easily.

  Also address memory range lookup issues like returning an invalid
  memory type. Furthermore, this handles the decoupling of PAT from MTRR
  more naturally.

  All work by Juergen Gross"

* tag 'x86_mtrr_for_v6.5' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  x86/xen: Set default memory type for PV guests to WB
  x86/mtrr: Unify debugging printing
  x86/mtrr: Remove unused code
  x86/mm: Only check uniform after calling mtrr_type_lookup()
  x86/mtrr: Don't let mtrr_type_lookup() return MTRR_TYPE_INVALID
  x86/mtrr: Use new cache_map in mtrr_type_lookup()
  x86/mtrr: Add mtrr=debug command line option
  x86/mtrr: Construct a memory map with cache modes
  x86/mtrr: Add get_effective_type() service function
  x86/mtrr: Allocate mtrr_value array dynamically
  x86/mtrr: Move 32-bit code from mtrr.c to legacy.c
  x86/mtrr: Have only one set_mtrr() variant
  x86/mtrr: Replace vendor tests in MTRR code
  x86/xen: Set MTRR state when running as Xen PV initial domain
  x86/hyperv: Set MTRR state when running as SEV-SNP Hyper-V guest
  x86/mtrr: Support setting MTRR state for software defined MTRRs
  x86/mtrr: Replace size_or_mask and size_and_mask with a much easier concept
  x86/mtrr: Remove physical address size calculation
parents 4baa098a 30d65d1b
......@@ -3433,6 +3433,10 @@
[HW] Make the MicroTouch USB driver use raw coordinates
('y', default) or cooked coordinates ('n')
mtrr=debug [X86]
Enable printing debug information related to MTRR
registers at boot time.
mtrr_chunk_size=nn[KMG] [X86]
used for mtrr cleanup. It is largest continuous chunk
that could hold holes aka. UC entries.
......
......@@ -17,6 +17,7 @@
#include <asm/mem_encrypt.h>
#include <asm/mshyperv.h>
#include <asm/hypervisor.h>
#include <asm/mtrr.h>
#ifdef CONFIG_AMD_MEM_ENCRYPT
......@@ -372,6 +373,9 @@ void __init hv_vtom_init(void)
x86_platform.guest.enc_cache_flush_required = hv_vtom_cache_flush_required;
x86_platform.guest.enc_tlb_flush_required = hv_vtom_tlb_flush_required;
x86_platform.guest.enc_status_change_finish = hv_vtom_set_host_visibility;
/* Set WB as the default cache mode. */
mtrr_overwrite_state(NULL, 0, MTRR_TYPE_WRBACK);
}
#endif /* CONFIG_AMD_MEM_ENCRYPT */
......
......@@ -23,14 +23,43 @@
#ifndef _ASM_X86_MTRR_H
#define _ASM_X86_MTRR_H
#include <linux/bits.h>
#include <uapi/asm/mtrr.h>
/* Defines for hardware MTRR registers. */
#define MTRR_CAP_VCNT GENMASK(7, 0)
#define MTRR_CAP_FIX BIT_MASK(8)
#define MTRR_CAP_WC BIT_MASK(10)
#define MTRR_DEF_TYPE_TYPE GENMASK(7, 0)
#define MTRR_DEF_TYPE_FE BIT_MASK(10)
#define MTRR_DEF_TYPE_E BIT_MASK(11)
#define MTRR_DEF_TYPE_ENABLE (MTRR_DEF_TYPE_FE | MTRR_DEF_TYPE_E)
#define MTRR_DEF_TYPE_DISABLE ~(MTRR_DEF_TYPE_TYPE | MTRR_DEF_TYPE_ENABLE)
#define MTRR_PHYSBASE_TYPE GENMASK(7, 0)
#define MTRR_PHYSBASE_RSVD GENMASK(11, 8)
#define MTRR_PHYSMASK_RSVD GENMASK(10, 0)
#define MTRR_PHYSMASK_V BIT_MASK(11)
struct mtrr_state_type {
struct mtrr_var_range var_ranges[MTRR_MAX_VAR_RANGES];
mtrr_type fixed_ranges[MTRR_NUM_FIXED_RANGES];
unsigned char enabled;
bool have_fixed;
mtrr_type def_type;
};
/*
* The following functions are for use by other drivers that cannot use
* arch_phys_wc_add and arch_phys_wc_del.
*/
# ifdef CONFIG_MTRR
void mtrr_bp_init(void);
void mtrr_overwrite_state(struct mtrr_var_range *var, unsigned int num_var,
mtrr_type def_type);
extern u8 mtrr_type_lookup(u64 addr, u64 end, u8 *uniform);
extern void mtrr_save_fixed_ranges(void *);
extern void mtrr_save_state(void);
......@@ -40,7 +69,6 @@ extern int mtrr_add_page(unsigned long base, unsigned long size,
unsigned int type, bool increment);
extern int mtrr_del(int reg, unsigned long base, unsigned long size);
extern int mtrr_del_page(int reg, unsigned long base, unsigned long size);
extern void mtrr_centaur_report_mcr(int mcr, u32 lo, u32 hi);
extern void mtrr_bp_restore(void);
extern int mtrr_trim_uncached_memory(unsigned long end_pfn);
extern int amd_special_default_mtrr(void);
......@@ -48,12 +76,21 @@ void mtrr_disable(void);
void mtrr_enable(void);
void mtrr_generic_set_state(void);
# else
static inline void mtrr_overwrite_state(struct mtrr_var_range *var,
unsigned int num_var,
mtrr_type def_type)
{
}
static inline u8 mtrr_type_lookup(u64 addr, u64 end, u8 *uniform)
{
/*
* Return no-MTRRs:
* Return the default MTRR type, without any known other types in
* that range.
*/
return MTRR_TYPE_INVALID;
*uniform = 1;
return MTRR_TYPE_UNCACHABLE;
}
#define mtrr_save_fixed_ranges(arg) do {} while (0)
#define mtrr_save_state() do {} while (0)
......@@ -79,9 +116,6 @@ static inline int mtrr_trim_uncached_memory(unsigned long end_pfn)
{
return 0;
}
static inline void mtrr_centaur_report_mcr(int mcr, u32 lo, u32 hi)
{
}
#define mtrr_bp_init() do {} while (0)
#define mtrr_bp_restore() do {} while (0)
#define mtrr_disable() do {} while (0)
......@@ -121,7 +155,8 @@ struct mtrr_gentry32 {
#endif /* CONFIG_COMPAT */
/* Bit fields for enabled in struct mtrr_state_type */
#define MTRR_STATE_MTRR_FIXED_ENABLED 0x01
#define MTRR_STATE_MTRR_ENABLED 0x02
#define MTRR_STATE_SHIFT 10
#define MTRR_STATE_MTRR_FIXED_ENABLED (MTRR_DEF_TYPE_FE >> MTRR_STATE_SHIFT)
#define MTRR_STATE_MTRR_ENABLED (MTRR_DEF_TYPE_E >> MTRR_STATE_SHIFT)
#endif /* _ASM_X86_MTRR_H */
......@@ -81,14 +81,6 @@ typedef __u8 mtrr_type;
#define MTRR_NUM_FIXED_RANGES 88
#define MTRR_MAX_VAR_RANGES 256
struct mtrr_state_type {
struct mtrr_var_range var_ranges[MTRR_MAX_VAR_RANGES];
mtrr_type fixed_ranges[MTRR_NUM_FIXED_RANGES];
unsigned char enabled;
unsigned char have_fixed;
mtrr_type def_type;
};
#define MTRRphysBase_MSR(reg) (0x200 + 2 * (reg))
#define MTRRphysMask_MSR(reg) (0x200 + 2 * (reg) + 1)
......@@ -115,9 +107,9 @@ struct mtrr_state_type {
#define MTRR_NUM_TYPES 7
/*
* Invalid MTRR memory type. mtrr_type_lookup() returns this value when
* MTRRs are disabled. Note, this value is allocated from the reserved
* values (0x7-0xff) of the MTRR memory types.
* Invalid MTRR memory type. No longer used outside of MTRR code.
* Note, this value is allocated from the reserved values (0x7-0xff) of
* the MTRR memory types.
*/
#define MTRR_TYPE_INVALID 0xff
......
# SPDX-License-Identifier: GPL-2.0-only
obj-y := mtrr.o if.o generic.o cleanup.o
obj-$(CONFIG_X86_32) += amd.o cyrix.o centaur.o
obj-$(CONFIG_X86_32) += amd.o cyrix.o centaur.o legacy.o
......@@ -110,7 +110,7 @@ amd_validate_add_page(unsigned long base, unsigned long size, unsigned int type)
}
const struct mtrr_ops amd_mtrr_ops = {
.vendor = X86_VENDOR_AMD,
.var_regs = 2,
.set = amd_set_mtrr,
.get = amd_get_mtrr,
.get_free_region = generic_get_free_region,
......
......@@ -45,15 +45,6 @@ centaur_get_free_region(unsigned long base, unsigned long size, int replace_reg)
return -ENOSPC;
}
/*
* Report boot time MCR setups
*/
void mtrr_centaur_report_mcr(int mcr, u32 lo, u32 hi)
{
centaur_mcr[mcr].low = lo;
centaur_mcr[mcr].high = hi;
}
static void
centaur_get_mcr(unsigned int reg, unsigned long *base,
unsigned long *size, mtrr_type * type)
......@@ -112,7 +103,7 @@ centaur_validate_add_page(unsigned long base, unsigned long size, unsigned int t
}
const struct mtrr_ops centaur_mtrr_ops = {
.vendor = X86_VENDOR_CENTAUR,
.var_regs = 8,
.set = centaur_set_mcr,
.get = centaur_get_mcr,
.get_free_region = centaur_get_free_region,
......
......@@ -55,9 +55,6 @@ static int __initdata nr_range;
static struct var_mtrr_range_state __initdata range_state[RANGE_NUM];
static int __initdata debug_print;
#define Dprintk(x...) do { if (debug_print) pr_debug(x); } while (0)
#define BIOS_BUG_MSG \
"WARNING: BIOS bug: VAR MTRR %d contains strange UC entry under 1M, check with your system vendor!\n"
......@@ -79,12 +76,11 @@ x86_get_mtrr_mem_range(struct range *range, int nr_range,
nr_range = add_range_with_merge(range, RANGE_NUM, nr_range,
base, base + size);
}
if (debug_print) {
pr_debug("After WB checking\n");
Dprintk("After WB checking\n");
for (i = 0; i < nr_range; i++)
pr_debug("MTRR MAP PFN: %016llx - %016llx\n",
Dprintk("MTRR MAP PFN: %016llx - %016llx\n",
range[i].start, range[i].end);
}
/* Take out UC ranges: */
for (i = 0; i < num_var_ranges; i++) {
......@@ -112,24 +108,22 @@ x86_get_mtrr_mem_range(struct range *range, int nr_range,
subtract_range(range, RANGE_NUM, extra_remove_base,
extra_remove_base + extra_remove_size);
if (debug_print) {
pr_debug("After UC checking\n");
Dprintk("After UC checking\n");
for (i = 0; i < RANGE_NUM; i++) {
if (!range[i].end)
continue;
pr_debug("MTRR MAP PFN: %016llx - %016llx\n",
Dprintk("MTRR MAP PFN: %016llx - %016llx\n",
range[i].start, range[i].end);
}
}
/* sort the ranges */
nr_range = clean_sort_range(range, RANGE_NUM);
if (debug_print) {
pr_debug("After sorting\n");
Dprintk("After sorting\n");
for (i = 0; i < nr_range; i++)
pr_debug("MTRR MAP PFN: %016llx - %016llx\n",
Dprintk("MTRR MAP PFN: %016llx - %016llx\n",
range[i].start, range[i].end);
}
return nr_range;
}
......@@ -164,16 +158,9 @@ static int __init enable_mtrr_cleanup_setup(char *str)
}
early_param("enable_mtrr_cleanup", enable_mtrr_cleanup_setup);
static int __init mtrr_cleanup_debug_setup(char *str)
{
debug_print = 1;
return 0;
}
early_param("mtrr_cleanup_debug", mtrr_cleanup_debug_setup);
static void __init
set_var_mtrr(unsigned int reg, unsigned long basek, unsigned long sizek,
unsigned char type, unsigned int address_bits)
unsigned char type)
{
u32 base_lo, base_hi, mask_lo, mask_hi;
u64 base, mask;
......@@ -183,7 +170,7 @@ set_var_mtrr(unsigned int reg, unsigned long basek, unsigned long sizek,
return;
}
mask = (1ULL << address_bits) - 1;
mask = (1ULL << boot_cpu_data.x86_phys_bits) - 1;
mask &= ~((((u64)sizek) << 10) - 1);
base = ((u64)basek) << 10;
......@@ -209,7 +196,7 @@ save_var_mtrr(unsigned int reg, unsigned long basek, unsigned long sizek,
range_state[reg].type = type;
}
static void __init set_var_mtrr_all(unsigned int address_bits)
static void __init set_var_mtrr_all(void)
{
unsigned long basek, sizek;
unsigned char type;
......@@ -220,7 +207,7 @@ static void __init set_var_mtrr_all(unsigned int address_bits)
sizek = range_state[reg].size_pfn << (PAGE_SHIFT - 10);
type = range_state[reg].type;
set_var_mtrr(reg, basek, sizek, type, address_bits);
set_var_mtrr(reg, basek, sizek, type);
}
}
......@@ -267,7 +254,7 @@ range_to_mtrr(unsigned int reg, unsigned long range_startk,
align = max_align;
sizek = 1UL << align;
if (debug_print) {
if (mtrr_debug) {
char start_factor = 'K', size_factor = 'K';
unsigned long start_base, size_base;
......@@ -542,7 +529,7 @@ static void __init print_out_mtrr_range_state(void)
start_base = to_size_factor(start_base, &start_factor);
type = range_state[i].type;
pr_debug("reg %d, base: %ld%cB, range: %ld%cB, type %s\n",
Dprintk("reg %d, base: %ld%cB, range: %ld%cB, type %s\n",
i, start_base, start_factor,
size_base, size_factor,
(type == MTRR_TYPE_UNCACHABLE) ? "UC" :
......@@ -680,7 +667,7 @@ static int __init mtrr_search_optimal_index(void)
return index_good;
}
int __init mtrr_cleanup(unsigned address_bits)
int __init mtrr_cleanup(void)
{
unsigned long x_remove_base, x_remove_size;
unsigned long base, size, def, dummy;
......@@ -689,7 +676,10 @@ int __init mtrr_cleanup(unsigned address_bits)
int index_good;
int i;
if (!is_cpu(INTEL) || enable_mtrr_cleanup < 1)
if (!mtrr_enabled())
return 0;
if (!cpu_feature_enabled(X86_FEATURE_MTRR) || enable_mtrr_cleanup < 1)
return 0;
rdmsr(MSR_MTRRdefType, def, dummy);
......@@ -711,7 +701,7 @@ int __init mtrr_cleanup(unsigned address_bits)
return 0;
/* Print original var MTRRs at first, for debugging: */
pr_debug("original variable MTRRs\n");
Dprintk("original variable MTRRs\n");
print_out_mtrr_range_state();
memset(range, 0, sizeof(range));
......@@ -742,8 +732,8 @@ int __init mtrr_cleanup(unsigned address_bits)
mtrr_print_out_one_result(i);
if (!result[i].bad) {
set_var_mtrr_all(address_bits);
pr_debug("New variable MTRRs\n");
set_var_mtrr_all();
Dprintk("New variable MTRRs\n");
print_out_mtrr_range_state();
return 1;
}
......@@ -763,7 +753,7 @@ int __init mtrr_cleanup(unsigned address_bits)
mtrr_calc_range_state(chunk_size, gran_size,
x_remove_base, x_remove_size, i);
if (debug_print) {
if (mtrr_debug) {
mtrr_print_out_one_result(i);
pr_info("\n");
}
......@@ -786,8 +776,8 @@ int __init mtrr_cleanup(unsigned address_bits)
gran_size = result[i].gran_sizek;
gran_size <<= 10;
x86_setup_var_mtrrs(range, nr_range, chunk_size, gran_size);
set_var_mtrr_all(address_bits);
pr_debug("New variable MTRRs\n");
set_var_mtrr_all();
Dprintk("New variable MTRRs\n");
print_out_mtrr_range_state();
return 1;
} else {
......@@ -802,7 +792,7 @@ int __init mtrr_cleanup(unsigned address_bits)
return 0;
}
#else
int __init mtrr_cleanup(unsigned address_bits)
int __init mtrr_cleanup(void)
{
return 0;
}
......@@ -882,15 +872,18 @@ int __init mtrr_trim_uncached_memory(unsigned long end_pfn)
/* extra one for all 0 */
int num[MTRR_NUM_TYPES + 1];
if (!mtrr_enabled())
return 0;
/*
* Make sure we only trim uncachable memory on machines that
* support the Intel MTRR architecture:
*/
if (!is_cpu(INTEL) || disable_mtrr_trim)
if (!cpu_feature_enabled(X86_FEATURE_MTRR) || disable_mtrr_trim)
return 0;
rdmsr(MSR_MTRRdefType, def, dummy);
def &= 0xff;
def &= MTRR_DEF_TYPE_TYPE;
if (def != MTRR_TYPE_UNCACHABLE)
return 0;
......
......@@ -235,7 +235,7 @@ static void cyrix_set_arr(unsigned int reg, unsigned long base,
}
const struct mtrr_ops cyrix_mtrr_ops = {
.vendor = X86_VENDOR_CYRIX,
.var_regs = 8,
.set = cyrix_set_arr,
.get = cyrix_get_arr,
.get_free_region = cyrix_get_free_region,
......
This diff is collapsed.
// SPDX-License-Identifier: GPL-2.0-only
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/syscore_ops.h>
#include <asm/cpufeature.h>
#include <asm/mtrr.h>
#include <asm/processor.h>
#include "mtrr.h"
void mtrr_set_if(void)
{
switch (boot_cpu_data.x86_vendor) {
case X86_VENDOR_AMD:
/* Pre-Athlon (K6) AMD CPU MTRRs */
if (cpu_feature_enabled(X86_FEATURE_K6_MTRR))
mtrr_if = &amd_mtrr_ops;
break;
case X86_VENDOR_CENTAUR:
if (cpu_feature_enabled(X86_FEATURE_CENTAUR_MCR))
mtrr_if = &centaur_mtrr_ops;
break;
case X86_VENDOR_CYRIX:
if (cpu_feature_enabled(X86_FEATURE_CYRIX_ARR))
mtrr_if = &cyrix_mtrr_ops;
break;
default:
break;
}
}
/*
* The suspend/resume methods are only for CPUs without MTRR. CPUs using generic
* MTRR driver don't require this.
*/
struct mtrr_value {
mtrr_type ltype;
unsigned long lbase;
unsigned long lsize;
};
static struct mtrr_value *mtrr_value;
static int mtrr_save(void)
{
int i;
if (!mtrr_value)
return -ENOMEM;
for (i = 0; i < num_var_ranges; i++) {
mtrr_if->get(i, &mtrr_value[i].lbase,
&mtrr_value[i].lsize,
&mtrr_value[i].ltype);
}
return 0;
}
static void mtrr_restore(void)
{
int i;
for (i = 0; i < num_var_ranges; i++) {
if (mtrr_value[i].lsize) {
mtrr_if->set(i, mtrr_value[i].lbase,
mtrr_value[i].lsize,
mtrr_value[i].ltype);
}
}
}
static struct syscore_ops mtrr_syscore_ops = {
.suspend = mtrr_save,
.resume = mtrr_restore,
};
void mtrr_register_syscore(void)
{
mtrr_value = kcalloc(num_var_ranges, sizeof(*mtrr_value), GFP_KERNEL);
/*
* The CPU has no MTRR and seems to not support SMP. They have
* specific drivers, we use a tricky method to support
* suspend/resume for them.
*
* TBD: is there any system with such CPU which supports
* suspend/resume? If no, we should remove the code.
*/
register_syscore_ops(&mtrr_syscore_ops);
}
......@@ -59,15 +59,9 @@
#define MTRR_TO_PHYS_WC_OFFSET 1000
u32 num_var_ranges;
static bool mtrr_enabled(void)
{
return !!mtrr_if;
}
unsigned int mtrr_usage_table[MTRR_MAX_VAR_RANGES];
static DEFINE_MUTEX(mtrr_mutex);
u64 size_or_mask, size_and_mask;
DEFINE_MUTEX(mtrr_mutex);
const struct mtrr_ops *mtrr_if;
......@@ -105,21 +99,6 @@ static int have_wrcomb(void)
return mtrr_if->have_wrcomb ? mtrr_if->have_wrcomb() : 0;
}
/* This function returns the number of variable MTRRs */
static void __init set_num_var_ranges(bool use_generic)
{
unsigned long config = 0, dummy;
if (use_generic)
rdmsr(MSR_MTRRcap, config, dummy);
else if (is_cpu(AMD) || is_cpu(HYGON))
config = 2;
else if (is_cpu(CYRIX) || is_cpu(CENTAUR))
config = 8;
num_var_ranges = config & 0xff;
}
static void __init init_table(void)
{
int i, max;
......@@ -194,20 +173,8 @@ static inline int types_compatible(mtrr_type type1, mtrr_type type2)
* Note that the mechanism is the same for UP systems, too; all the SMP stuff
* becomes nops.
*/
static void
set_mtrr(unsigned int reg, unsigned long base, unsigned long size, mtrr_type type)
{
struct set_mtrr_data data = { .smp_reg = reg,
.smp_base = base,
.smp_size = size,
.smp_type = type
};
stop_machine(mtrr_rendezvous_handler, &data, cpu_online_mask);
}
static void set_mtrr_cpuslocked(unsigned int reg, unsigned long base,
unsigned long size, mtrr_type type)
static void set_mtrr(unsigned int reg, unsigned long base, unsigned long size,
mtrr_type type)
{
struct set_mtrr_data data = { .smp_reg = reg,
.smp_base = base,
......@@ -216,6 +183,8 @@ static void set_mtrr_cpuslocked(unsigned int reg, unsigned long base,
};
stop_machine_cpuslocked(mtrr_rendezvous_handler, &data, cpu_online_mask);
generic_rebuild_map();
}
/**
......@@ -337,7 +306,7 @@ int mtrr_add_page(unsigned long base, unsigned long size,
/* Search for an empty MTRR */
i = mtrr_if->get_free_region(base, size, replace);
if (i >= 0) {
set_mtrr_cpuslocked(i, base, size, type);
set_mtrr(i, base, size, type);
if (likely(replace < 0)) {
mtrr_usage_table[i] = 1;
} else {
......@@ -345,7 +314,7 @@ int mtrr_add_page(unsigned long base, unsigned long size,
if (increment)
mtrr_usage_table[i]++;
if (unlikely(replace != i)) {
set_mtrr_cpuslocked(replace, 0, 0, 0);
set_mtrr(replace, 0, 0, 0);
mtrr_usage_table[replace] = 0;
}
}
......@@ -363,7 +332,7 @@ static int mtrr_check(unsigned long base, unsigned long size)
{
if ((base & (PAGE_SIZE - 1)) || (size & (PAGE_SIZE - 1))) {
pr_warn("size and base must be multiples of 4 kiB\n");
pr_debug("size: 0x%lx base: 0x%lx\n", size, base);
Dprintk("size: 0x%lx base: 0x%lx\n", size, base);
dump_stack();
return -1;
}
......@@ -454,8 +423,7 @@ int mtrr_del_page(int reg, unsigned long base, unsigned long size)
}
}
if (reg < 0) {
pr_debug("no MTRR for %lx000,%lx000 found\n",
base, size);
Dprintk("no MTRR for %lx000,%lx000 found\n", base, size);
goto out;
}
}
......@@ -473,7 +441,7 @@ int mtrr_del_page(int reg, unsigned long base, unsigned long size)
goto out;
}
if (--mtrr_usage_table[reg] < 1)
set_mtrr_cpuslocked(reg, 0, 0, 0);
set_mtrr(reg, 0, 0, 0);
error = reg;
out:
mutex_unlock(&mtrr_mutex);
......@@ -574,136 +542,54 @@ int arch_phys_wc_index(int handle)
}
EXPORT_SYMBOL_GPL(arch_phys_wc_index);
/* The suspend/resume methods are only for CPU without MTRR. CPU using generic
* MTRR driver doesn't require this
*/
struct mtrr_value {
mtrr_type ltype;
unsigned long lbase;
unsigned long lsize;
};
static struct mtrr_value mtrr_value[MTRR_MAX_VAR_RANGES];
static int mtrr_save(void)
{
int i;
for (i = 0; i < num_var_ranges; i++) {
mtrr_if->get(i, &mtrr_value[i].lbase,
&mtrr_value[i].lsize,
&mtrr_value[i].ltype);
}
return 0;
}
static void mtrr_restore(void)
{
int i;
for (i = 0; i < num_var_ranges; i++) {
if (mtrr_value[i].lsize) {
set_mtrr(i, mtrr_value[i].lbase,
mtrr_value[i].lsize,
mtrr_value[i].ltype);
}
}
}
static struct syscore_ops mtrr_syscore_ops = {
.suspend = mtrr_save,
.resume = mtrr_restore,
};
int __initdata changed_by_mtrr_cleanup;
#define SIZE_OR_MASK_BITS(n) (~((1ULL << ((n) - PAGE_SHIFT)) - 1))
/**
* mtrr_bp_init - initialize mtrrs on the boot CPU
* mtrr_bp_init - initialize MTRRs on the boot CPU
*
* This needs to be called early; before any of the other CPUs are
* initialized (i.e. before smp_init()).
*
*/
void __init mtrr_bp_init(void)
{
bool generic_mtrrs = cpu_feature_enabled(X86_FEATURE_MTRR);
const char *why = "(not available)";
u32 phys_addr;
phys_addr = 32;
unsigned long config, dummy;
if (boot_cpu_has(X86_FEATURE_MTRR)) {
mtrr_if = &generic_mtrr_ops;
size_or_mask = SIZE_OR_MASK_BITS(36);
size_and_mask = 0x00f00000;
phys_addr = 36;
phys_hi_rsvd = GENMASK(31, boot_cpu_data.x86_phys_bits - 32);
if (!generic_mtrrs && mtrr_state.enabled) {
/*
* This is an AMD specific MSR, but we assume(hope?) that
* Intel will implement it too when they extend the address
* bus of the Xeon.
*/
if (cpuid_eax(0x80000000) >= 0x80000008) {
phys_addr = cpuid_eax(0x80000008) & 0xff;
/* CPUID workaround for Intel 0F33/0F34 CPU */
if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
boot_cpu_data.x86 == 0xF &&
boot_cpu_data.x86_model == 0x3 &&
(boot_cpu_data.x86_stepping == 0x3 ||
boot_cpu_data.x86_stepping == 0x4))
phys_addr = 36;
size_or_mask = SIZE_OR_MASK_BITS(phys_addr);
size_and_mask = ~size_or_mask & 0xfffff00000ULL;
} else if (boot_cpu_data.x86_vendor == X86_VENDOR_CENTAUR &&
boot_cpu_data.x86 == 6) {
/*
* VIA C* family have Intel style MTRRs,
* but don't support PAE
* Software overwrite of MTRR state, only for generic case.
* Note that X86_FEATURE_MTRR has been reset in this case.
*/
size_or_mask = SIZE_OR_MASK_BITS(32);
size_and_mask = 0;
phys_addr = 32;
}
} else {
switch (boot_cpu_data.x86_vendor) {
case X86_VENDOR_AMD:
if (cpu_feature_enabled(X86_FEATURE_K6_MTRR)) {
/* Pre-Athlon (K6) AMD CPU MTRRs */
mtrr_if = &amd_mtrr_ops;
size_or_mask = SIZE_OR_MASK_BITS(32);
size_and_mask = 0;
}
break;
case X86_VENDOR_CENTAUR:
if (cpu_feature_enabled(X86_FEATURE_CENTAUR_MCR)) {
mtrr_if = &centaur_mtrr_ops;
size_or_mask = SIZE_OR_MASK_BITS(32);
size_and_mask = 0;
}
break;
case X86_VENDOR_CYRIX:
if (cpu_feature_enabled(X86_FEATURE_CYRIX_ARR)) {
mtrr_if = &cyrix_mtrr_ops;
size_or_mask = SIZE_OR_MASK_BITS(32);
size_and_mask = 0;
}
break;
default:
break;
}
init_table();
mtrr_build_map();
pr_info("MTRRs set to read-only\n");
return;
}
if (generic_mtrrs)
mtrr_if = &generic_mtrr_ops;
else
mtrr_set_if();
if (mtrr_enabled()) {
set_num_var_ranges(mtrr_if == &generic_mtrr_ops);
/* Get the number of variable MTRR ranges. */
if (mtrr_if == &generic_mtrr_ops)
rdmsr(MSR_MTRRcap, config, dummy);
else
config = mtrr_if->var_regs;
num_var_ranges = config & MTRR_CAP_VCNT;
init_table();
if (mtrr_if == &generic_mtrr_ops) {
/* BIOS may override */
if (get_mtrr_state()) {
memory_caching_control |= CACHE_MTRR;
changed_by_mtrr_cleanup = mtrr_cleanup(phys_addr);
changed_by_mtrr_cleanup = mtrr_cleanup();
mtrr_build_map();
} else {
mtrr_if = NULL;
why = "by BIOS";
......@@ -730,8 +616,14 @@ void mtrr_save_state(void)
smp_call_function_single(first_cpu, mtrr_save_fixed_ranges, NULL, 1);
}
static int __init mtrr_init_finialize(void)
static int __init mtrr_init_finalize(void)
{
/*
* Map might exist if mtrr_overwrite_state() has been called or if
* mtrr_enabled() returns true.
*/
mtrr_copy_map();
if (!mtrr_enabled())
return 0;
......@@ -741,16 +633,8 @@ static int __init mtrr_init_finialize(void)
return 0;
}
/*
* The CPU has no MTRR and seems to not support SMP. They have
* specific drivers, we use a tricky method to support
* suspend/resume for them.
*
* TBD: is there any system with such CPU which supports
* suspend/resume? If no, we should remove the code.
*/
register_syscore_ops(&mtrr_syscore_ops);
mtrr_register_syscore();
return 0;
}
subsys_initcall(mtrr_init_finialize);
subsys_initcall(mtrr_init_finalize);
......@@ -10,10 +10,13 @@
#define MTRR_CHANGE_MASK_VARIABLE 0x02
#define MTRR_CHANGE_MASK_DEFTYPE 0x04
extern bool mtrr_debug;
#define Dprintk(x...) do { if (mtrr_debug) pr_info(x); } while (0)
extern unsigned int mtrr_usage_table[MTRR_MAX_VAR_RANGES];
struct mtrr_ops {
u32 vendor;
u32 var_regs;
void (*set)(unsigned int reg, unsigned long base,
unsigned long size, mtrr_type type);
void (*get)(unsigned int reg, unsigned long *base,
......@@ -51,18 +54,26 @@ void fill_mtrr_var_range(unsigned int index,
u32 base_lo, u32 base_hi, u32 mask_lo, u32 mask_hi);
bool get_mtrr_state(void);
extern u64 size_or_mask, size_and_mask;
extern const struct mtrr_ops *mtrr_if;
#define is_cpu(vnd) (mtrr_if && mtrr_if->vendor == X86_VENDOR_##vnd)
extern struct mutex mtrr_mutex;
extern unsigned int num_var_ranges;
extern u64 mtrr_tom2;
extern struct mtrr_state_type mtrr_state;
extern u32 phys_hi_rsvd;
void mtrr_state_warn(void);
const char *mtrr_attrib_to_str(int x);
void mtrr_wrmsr(unsigned, unsigned, unsigned);
#ifdef CONFIG_X86_32
void mtrr_set_if(void);
void mtrr_register_syscore(void);
#else
static inline void mtrr_set_if(void) { }
static inline void mtrr_register_syscore(void) { }
#endif
void mtrr_build_map(void);
void mtrr_copy_map(void);
/* CPU specific mtrr_ops vectors. */
extern const struct mtrr_ops amd_mtrr_ops;
......@@ -70,4 +81,14 @@ extern const struct mtrr_ops cyrix_mtrr_ops;
extern const struct mtrr_ops centaur_mtrr_ops;
extern int changed_by_mtrr_cleanup;
extern int mtrr_cleanup(unsigned address_bits);
extern int mtrr_cleanup(void);
/*
* Must be used by code which uses mtrr_if to call platform-specific
* MTRR manipulation functions.
*/
static inline bool mtrr_enabled(void)
{
return !!mtrr_if;
}
void generic_rebuild_map(void);
......@@ -1037,6 +1037,8 @@ void __init setup_arch(char **cmdline_p)
/*
* VMware detection requires dmi to be available, so this
* needs to be done after dmi_setup(), for the boot CPU.
* For some guest types (Xen PV, SEV-SNP, TDX) it is required to be
* called before cache_bp_init() for setting up MTRR state.
*/
init_hypervisor_platform();
......
......@@ -702,14 +702,8 @@ void p4d_clear_huge(p4d_t *p4d)
* pud_set_huge - setup kernel PUD mapping
*
* MTRRs can override PAT memory types with 4KiB granularity. Therefore, this
* function sets up a huge page only if any of the following conditions are met:
*
* - MTRRs are disabled, or
*
* - MTRRs are enabled and the range is completely covered by a single MTRR, or
*
* - MTRRs are enabled and the corresponding MTRR memory type is WB, which
* has no effect on the requested PAT memory type.
* function sets up a huge page only if the complete range has the same MTRR
* caching mode.
*
* Callers should try to decrease page size (1GB -> 2MB -> 4K) if the bigger
* page mapping attempt fails.
......@@ -718,11 +712,10 @@ void p4d_clear_huge(p4d_t *p4d)
*/
int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot)
{
u8 mtrr, uniform;
u8 uniform;
mtrr = mtrr_type_lookup(addr, addr + PUD_SIZE, &uniform);
if ((mtrr != MTRR_TYPE_INVALID) && (!uniform) &&
(mtrr != MTRR_TYPE_WRBACK))
mtrr_type_lookup(addr, addr + PUD_SIZE, &uniform);
if (!uniform)
return 0;
/* Bail out if we are we on a populated non-leaf entry: */
......@@ -745,11 +738,10 @@ int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot)
*/
int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot)
{
u8 mtrr, uniform;
u8 uniform;
mtrr = mtrr_type_lookup(addr, addr + PMD_SIZE, &uniform);
if ((mtrr != MTRR_TYPE_INVALID) && (!uniform) &&
(mtrr != MTRR_TYPE_WRBACK)) {
mtrr_type_lookup(addr, addr + PMD_SIZE, &uniform);
if (!uniform) {
pr_warn_once("%s: Cannot satisfy [mem %#010llx-%#010llx] with a huge-page mapping due to MTRR override.\n",
__func__, addr, addr + PMD_SIZE);
return 0;
......
......@@ -68,6 +68,7 @@
#include <asm/reboot.h>
#include <asm/hypervisor.h>
#include <asm/mach_traps.h>
#include <asm/mtrr.h>
#include <asm/mwait.h>
#include <asm/pci_x86.h>
#include <asm/cpu.h>
......@@ -119,6 +120,54 @@ static int __init parse_xen_msr_safe(char *str)
}
early_param("xen_msr_safe", parse_xen_msr_safe);
/* Get MTRR settings from Xen and put them into mtrr_state. */
static void __init xen_set_mtrr_data(void)
{
#ifdef CONFIG_MTRR
struct xen_platform_op op = {
.cmd = XENPF_read_memtype,
.interface_version = XENPF_INTERFACE_VERSION,
};
unsigned int reg;
unsigned long mask;
uint32_t eax, width;
static struct mtrr_var_range var[MTRR_MAX_VAR_RANGES] __initdata;
/* Get physical address width (only 64-bit cpus supported). */
width = 36;
eax = cpuid_eax(0x80000000);
if ((eax >> 16) == 0x8000 && eax >= 0x80000008) {
eax = cpuid_eax(0x80000008);
width = eax & 0xff;
}
for (reg = 0; reg < MTRR_MAX_VAR_RANGES; reg++) {
op.u.read_memtype.reg = reg;
if (HYPERVISOR_platform_op(&op))
break;
/*
* Only called in dom0, which has all RAM PFNs mapped at
* RAM MFNs, and all PCI space etc. is identity mapped.
* This means we can treat MFN == PFN regarding MTRR settings.
*/
var[reg].base_lo = op.u.read_memtype.type;
var[reg].base_lo |= op.u.read_memtype.mfn << PAGE_SHIFT;
var[reg].base_hi = op.u.read_memtype.mfn >> (32 - PAGE_SHIFT);
mask = ~((op.u.read_memtype.nr_mfns << PAGE_SHIFT) - 1);
mask &= (1UL << width) - 1;
if (mask)
mask |= MTRR_PHYSMASK_V;
var[reg].mask_lo = mask;
var[reg].mask_hi = mask >> 32;
}
/* Only overwrite MTRR state if any MTRR could be got from Xen. */
if (reg)
mtrr_overwrite_state(var, reg, MTRR_TYPE_UNCACHABLE);
#endif
}
static void __init xen_pv_init_platform(void)
{
/* PV guests can't operate virtio devices without grants. */
......@@ -135,6 +184,11 @@ static void __init xen_pv_init_platform(void)
/* pvclock is in shared info area */
xen_init_time_ops();
if (xen_initial_domain())
xen_set_mtrr_data();
else
mtrr_overwrite_state(NULL, 0, MTRR_TYPE_WRBACK);
}
static void __init xen_pv_guest_late_init(void)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment