Commit 4cc445b4 authored by Christophe Leroy's avatar Christophe Leroy Committed by Michael Ellerman

powerpc/32s: Use mmu_has_feature(MMU_FTR_HPTE_TABLE) instead of checking Hash var

We now have an early hash table on hash MMU, so no need to check
Hash var to know if the Hash table is set of not.

Use mmu_has_feature(MMU_FTR_HPTE_TABLE) instead. This will allow
optimisation via jump_label.
Signed-off-by: default avatarChristophe Leroy <christophe.leroy@csgroup.eu>
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/f1766631a9e014b6433f1a3c12c726ddfce34220.1603348103.git.christophe.leroy@csgroup.eu
parent 03d5b19c
...@@ -306,7 +306,7 @@ static void hash_preload(struct mm_struct *mm, unsigned long ea) ...@@ -306,7 +306,7 @@ static void hash_preload(struct mm_struct *mm, unsigned long ea)
{ {
pmd_t *pmd; pmd_t *pmd;
if (!Hash) if (!mmu_has_feature(MMU_FTR_HPTE_TABLE))
return; return;
pmd = pmd_off(mm, ea); pmd = pmd_off(mm, ea);
if (!pmd_none(*pmd)) if (!pmd_none(*pmd))
......
...@@ -36,7 +36,7 @@ void flush_hash_entry(struct mm_struct *mm, pte_t *ptep, unsigned long addr) ...@@ -36,7 +36,7 @@ void flush_hash_entry(struct mm_struct *mm, pte_t *ptep, unsigned long addr)
{ {
unsigned long ptephys; unsigned long ptephys;
if (Hash) { if (mmu_has_feature(MMU_FTR_HPTE_TABLE)) {
ptephys = __pa(ptep) & PAGE_MASK; ptephys = __pa(ptep) & PAGE_MASK;
flush_hash_pages(mm->context.id, addr, ptephys, 1); flush_hash_pages(mm->context.id, addr, ptephys, 1);
} }
...@@ -49,7 +49,7 @@ EXPORT_SYMBOL(flush_hash_entry); ...@@ -49,7 +49,7 @@ EXPORT_SYMBOL(flush_hash_entry);
*/ */
void tlb_flush(struct mmu_gather *tlb) void tlb_flush(struct mmu_gather *tlb)
{ {
if (!Hash) { if (!mmu_has_feature(MMU_FTR_HPTE_TABLE)) {
/* /*
* 603 needs to flush the whole TLB here since * 603 needs to flush the whole TLB here since
* it doesn't use a hash table. * it doesn't use a hash table.
...@@ -80,7 +80,7 @@ static void flush_range(struct mm_struct *mm, unsigned long start, ...@@ -80,7 +80,7 @@ static void flush_range(struct mm_struct *mm, unsigned long start,
unsigned int ctx = mm->context.id; unsigned int ctx = mm->context.id;
start &= PAGE_MASK; start &= PAGE_MASK;
if (!Hash) { if (!mmu_has_feature(MMU_FTR_HPTE_TABLE)) {
if (end - start <= PAGE_SIZE) if (end - start <= PAGE_SIZE)
_tlbie(start); _tlbie(start);
else else
...@@ -122,7 +122,7 @@ void flush_tlb_mm(struct mm_struct *mm) ...@@ -122,7 +122,7 @@ void flush_tlb_mm(struct mm_struct *mm)
{ {
struct vm_area_struct *mp; struct vm_area_struct *mp;
if (!Hash) { if (!mmu_has_feature(MMU_FTR_HPTE_TABLE)) {
_tlbia(); _tlbia();
return; return;
} }
...@@ -143,7 +143,7 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr) ...@@ -143,7 +143,7 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
struct mm_struct *mm; struct mm_struct *mm;
pmd_t *pmd; pmd_t *pmd;
if (!Hash) { if (!mmu_has_feature(MMU_FTR_HPTE_TABLE)) {
_tlbie(vmaddr); _tlbie(vmaddr);
return; return;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment