ppc32: fix a possible race in pte_free()

Another processor could be walking the page table in the middle of the
PTE page to be freeded. Synchronize with hash_page using the lock.
parent cc3ecc6e
......@@ -36,6 +36,32 @@
.comm mmu_hash_lock,4
#endif /* CONFIG_SMP */
/*
* Sync CPUs with hash_page taking & releasing the hash
* table lock
*/
#ifdef CONFIG_SMP
.text
_GLOBAL(hash_page_sync)
lis r8,mmu_hash_lock@h
ori r8,r8,mmu_hash_lock@l
lis r0,0x0fff
b 10f
11: lwz r6,0(r8)
cmpwi 0,r6,0
bne 11b
10: lwarx r6,0,r8
cmpwi 0,r6,0
bne- 11b
stwcx. r0,0,r8
bne- 10b
isync
eieio
li r0,0
stw r0,0(r8)
blr
#endif
/*
* Load a PTE into the hash table, if possible.
* The address is in r4, and r3 contains an access flag:
......
......@@ -44,6 +44,10 @@ int io_bat_index;
extern char etext[], _stext[];
#ifdef CONFIG_SMP
extern void hash_page_sync(void);
#endif
#ifdef HAVE_BATS
extern unsigned long v_mapped_by_bats(unsigned long va);
extern unsigned long p_mapped_by_bats(unsigned long pa);
......@@ -109,11 +113,17 @@ struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address)
void pte_free_kernel(pte_t *pte)
{
#ifdef CONFIG_SMP
hash_page_sync();
#endif
free_page((unsigned long)pte);
}
void pte_free(struct page *pte)
{
#ifdef CONFIG_SMP
hash_page_sync();
#endif
__free_page(pte);
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment