Commit 4aa60221 authored by Marc Zyngier's avatar Marc Zyngier Committed by Greg Kroah-Hartman

ARM: 7768/1: prevent risks of out-of-bound access in ASID allocator

commit b8e4a474 upstream.

On a CPU that never ran anything, both the active and reserved ASID
fields are set to zero. In this case the ASID_TO_IDX() macro will
return -1, which is not a very useful value to index a bitmap.

Instead of trying to offset the ASID so that ASID #1 is actually
bit 0 in the asid_map bitmap, just always ignore bit 0 and start
the search from bit 1. This makes the code a bit more readable,
and without risk of OoB access.
Acked-by: default avatarWill Deacon <will.deacon@arm.com>
Acked-by: default avatarCatalin Marinas <catalin.marinas@arm.com>
Reported-by: default avatarCatalin Marinas <catalin.marinas@arm.com>
Signed-off-by: default avatarMarc Zyngier <marc.zyngier@arm.com>
Signed-off-by: default avatarRussell King <rmk+kernel@arm.linux.org.uk>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent b7dc4032
...@@ -39,10 +39,7 @@ ...@@ -39,10 +39,7 @@
* non 64-bit operations. * non 64-bit operations.
*/ */
#define ASID_FIRST_VERSION (1ULL << ASID_BITS) #define ASID_FIRST_VERSION (1ULL << ASID_BITS)
#define NUM_USER_ASIDS (ASID_FIRST_VERSION - 1) #define NUM_USER_ASIDS ASID_FIRST_VERSION
#define ASID_TO_IDX(asid) ((asid & ~ASID_MASK) - 1)
#define IDX_TO_ASID(idx) ((idx + 1) & ~ASID_MASK)
static DEFINE_RAW_SPINLOCK(cpu_asid_lock); static DEFINE_RAW_SPINLOCK(cpu_asid_lock);
static atomic64_t asid_generation = ATOMIC64_INIT(ASID_FIRST_VERSION); static atomic64_t asid_generation = ATOMIC64_INIT(ASID_FIRST_VERSION);
...@@ -137,7 +134,7 @@ static void flush_context(unsigned int cpu) ...@@ -137,7 +134,7 @@ static void flush_context(unsigned int cpu)
*/ */
if (asid == 0) if (asid == 0)
asid = per_cpu(reserved_asids, i); asid = per_cpu(reserved_asids, i);
__set_bit(ASID_TO_IDX(asid), asid_map); __set_bit(asid & ~ASID_MASK, asid_map);
} }
per_cpu(reserved_asids, i) = asid; per_cpu(reserved_asids, i) = asid;
} }
...@@ -176,17 +173,19 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu) ...@@ -176,17 +173,19 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu)
/* /*
* Allocate a free ASID. If we can't find one, take a * Allocate a free ASID. If we can't find one, take a
* note of the currently active ASIDs and mark the TLBs * note of the currently active ASIDs and mark the TLBs
* as requiring flushes. * as requiring flushes. We always count from ASID #1,
* as we reserve ASID #0 to switch via TTBR0 and indicate
* rollover events.
*/ */
asid = find_first_zero_bit(asid_map, NUM_USER_ASIDS); asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, 1);
if (asid == NUM_USER_ASIDS) { if (asid == NUM_USER_ASIDS) {
generation = atomic64_add_return(ASID_FIRST_VERSION, generation = atomic64_add_return(ASID_FIRST_VERSION,
&asid_generation); &asid_generation);
flush_context(cpu); flush_context(cpu);
asid = find_first_zero_bit(asid_map, NUM_USER_ASIDS); asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, 1);
} }
__set_bit(asid, asid_map); __set_bit(asid, asid_map);
asid = generation | IDX_TO_ASID(asid); asid |= generation;
cpumask_clear(mm_cpumask(mm)); cpumask_clear(mm_cpumask(mm));
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment