Commit 351e5a70 authored by Venkatesh Pallipadi's avatar Venkatesh Pallipadi Committed by H. Peter Anvin

x86, mtrr: Support mtrr lookup for range spanning across MTRR range

mtrr_type_lookup [start:end] looked up the resultant MTRR type for that
range, based on fixed and all variable MTRR ranges. It did check for multiple
MTRR var ranges overlapping [start:end] and returned the net type.

However, if the [start:end] range spanned across any var MTRR range,
mtrr_type_lookup would return an error return of 0xFE. This was based on
typical usage of mtrr_type_lookup in PAT mapping, where region being
mapped would not normally span across MTRR ranges and also trying
to keep the code simple.

Mark recently reported the problem with this limitation. When there are
two continguous MTRR's of type "writeback" and if there is a memory mapping
over a region starting in one MTRR range and ending in another MTRR range,
such mapping will fallback to "uncached" due to the above limitation.

Change below adds support for such lookups spanning multiple MTRR ranges.
We now have a wrapper mtrr_type_lookup that dynamically splits such a region
into smaller chunks that fit within one MTRR range and does a
__mtrr_type_lookup on it and combine the results later.
Reported-by: default avatarMark Langsdorf <mark.langsdorf@amd.com>
Signed-off-by: default avatarVenkatesh Pallipadi <venki@google.com>
LKML-Reference: <1284159350-19841-3-git-send-email-venki@google.com>
Reviewed-by: default avatarSuresh Siddha <suresh.b.siddha@intel.com>
Signed-off-by: default avatarH. Peter Anvin <hpa@linux.intel.com>
parent a7f07cfb
...@@ -64,6 +64,18 @@ static inline void k8_check_syscfg_dram_mod_en(void) ...@@ -64,6 +64,18 @@ static inline void k8_check_syscfg_dram_mod_en(void)
} }
} }
/* Get the size of contiguous MTRR range */
static u64 get_mtrr_size(u64 mask)
{
u64 size;
mask >>= PAGE_SHIFT;
mask |= size_or_mask;
size = -mask;
size <<= PAGE_SHIFT;
return size;
}
/* /*
* Check and return the effective type for MTRR-MTRR type overlap. * Check and return the effective type for MTRR-MTRR type overlap.
* Returns 1 if the effective type is UNCACHEABLE, else returns 0 * Returns 1 if the effective type is UNCACHEABLE, else returns 0
...@@ -92,17 +104,19 @@ static int check_type_overlap(u8 *prev, u8 *curr) ...@@ -92,17 +104,19 @@ static int check_type_overlap(u8 *prev, u8 *curr)
} }
/* /*
* Returns the effective MTRR type for the region * Error/Semi-error returns:
* Error returns: * 0xFF - when MTRR is not enabled
* - 0xFE - when the range is "not entirely covered" by _any_ var range MTRR * *repeat == 1 implies [start:end] spanned across MTRR range and type returned
* - 0xFF - when MTRR is not enabled * corresponds only to [start:*partial_end].
* Caller has to lookup again for [*partial_end:end].
*/ */
u8 mtrr_type_lookup(u64 start, u64 end) static u8 __mtrr_type_lookup(u64 start, u64 end, u64 *partial_end, int *repeat)
{ {
int i; int i;
u64 base, mask; u64 base, mask;
u8 prev_match, curr_match; u8 prev_match, curr_match;
*repeat = 0;
if (!mtrr_state_set) if (!mtrr_state_set)
return 0xFF; return 0xFF;
...@@ -153,8 +167,34 @@ u8 mtrr_type_lookup(u64 start, u64 end) ...@@ -153,8 +167,34 @@ u8 mtrr_type_lookup(u64 start, u64 end)
start_state = ((start & mask) == (base & mask)); start_state = ((start & mask) == (base & mask));
end_state = ((end & mask) == (base & mask)); end_state = ((end & mask) == (base & mask));
if (start_state != end_state)
return 0xFE; if (start_state != end_state) {
/*
* We have start:end spanning across an MTRR.
* We split the region into
* either
* (start:mtrr_end) (mtrr_end:end)
* or
* (start:mtrr_start) (mtrr_start:end)
* depending on kind of overlap.
* Return the type for first region and a pointer to
* the start of second region so that caller will
* lookup again on the second region.
* Note: This way we handle multiple overlaps as well.
*/
if (start_state)
*partial_end = base + get_mtrr_size(mask);
else
*partial_end = base;
if (unlikely(*partial_end <= start)) {
WARN_ON(1);
*partial_end = start + PAGE_SIZE;
}
end = *partial_end - 1; /* end is inclusive */
*repeat = 1;
}
if ((start & mask) != (base & mask)) if ((start & mask) != (base & mask))
continue; continue;
...@@ -180,6 +220,36 @@ u8 mtrr_type_lookup(u64 start, u64 end) ...@@ -180,6 +220,36 @@ u8 mtrr_type_lookup(u64 start, u64 end)
return mtrr_state.def_type; return mtrr_state.def_type;
} }
/*
* Returns the effective MTRR type for the region
* Error return:
* 0xFF - when MTRR is not enabled
*/
u8 mtrr_type_lookup(u64 start, u64 end)
{
u8 type, prev_type;
int repeat;
u64 partial_end;
type = __mtrr_type_lookup(start, end, &partial_end, &repeat);
/*
* Common path is with repeat = 0.
* However, we can have cases where [start:end] spans across some
* MTRR range. Do repeated lookups for that case here.
*/
while (repeat) {
prev_type = type;
start = partial_end;
type = __mtrr_type_lookup(start, end, &partial_end, &repeat);
if (check_type_overlap(&prev_type, &type))
return type;
}
return type;
}
/* Get the MSR pair relating to a var range */ /* Get the MSR pair relating to a var range */
static void static void
get_mtrr_var_range(unsigned int index, struct mtrr_var_range *vr) get_mtrr_var_range(unsigned int index, struct mtrr_var_range *vr)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment