Commit 28d3b363 authored by Anton Blanchard's avatar Anton Blanchard

ppc64: add POWER4+ (GQ) support

parent 8139c761
...@@ -238,7 +238,7 @@ fix_alignment(struct pt_regs *regs) ...@@ -238,7 +238,7 @@ fix_alignment(struct pt_regs *regs)
dsisr = regs->dsisr; dsisr = regs->dsisr;
/* Power4 doesn't set DSISR for an alignment interrupt */ /* Power4 doesn't set DSISR for an alignment interrupt */
if (__is_processor(PV_POWER4)) if (__is_processor(PV_POWER4) || __is_processor(PV_POWER4p))
dsisr = make_dsisr( *((unsigned *)regs->nip) ); dsisr = make_dsisr( *((unsigned *)regs->nip) );
/* extract the operation and registers from the dsisr */ /* extract the operation and registers from the dsisr */
......
...@@ -139,7 +139,7 @@ htab_initialize(void) ...@@ -139,7 +139,7 @@ htab_initialize(void)
mask = pteg_count-1; mask = pteg_count-1;
/* XXX we currently map kernel text rw, should fix this */ /* XXX we currently map kernel text rw, should fix this */
if (__is_processor(PV_POWER4) && _naca->physicalMemorySize > 256*MB) { if (cpu_has_largepage() && _naca->physicalMemorySize > 256*MB) {
create_pte_mapping((unsigned long)KERNELBASE, create_pte_mapping((unsigned long)KERNELBASE,
KERNELBASE + 256*MB, mode_rw, mask, 0); KERNELBASE + 256*MB, mode_rw, mask, 0);
create_pte_mapping((unsigned long)KERNELBASE + 256*MB, create_pte_mapping((unsigned long)KERNELBASE + 256*MB,
...@@ -245,9 +245,8 @@ int __hash_page(unsigned long ea, unsigned long access, unsigned long vsid, ...@@ -245,9 +245,8 @@ int __hash_page(unsigned long ea, unsigned long access, unsigned long vsid,
#define PPC64_HWNOEXEC (1 << 2) #define PPC64_HWNOEXEC (1 << 2)
/* We do lazy icache flushing on POWER4 */ /* We do lazy icache flushing on cpus that support it */
if (unlikely(__is_processor(PV_POWER4) && if (unlikely(cpu_has_noexecute() && pfn_valid(pte_pfn(new_pte)))) {
pfn_valid(pte_pfn(new_pte)))) {
struct page *page = pte_page(new_pte); struct page *page = pte_page(new_pte);
/* page is dirty */ /* page is dirty */
......
...@@ -258,7 +258,7 @@ unsigned long iSeries_process_mainstore_vpd( struct MemoryBlock *mb_array, unsig ...@@ -258,7 +258,7 @@ unsigned long iSeries_process_mainstore_vpd( struct MemoryBlock *mb_array, unsig
{ {
unsigned long i; unsigned long i;
unsigned long mem_blocks = 0; unsigned long mem_blocks = 0;
if ( __is_processor( PV_POWER4 ) ) if (__is_processor(PV_POWER4) || __is_processor(PV_POWER4p))
mem_blocks = iSeries_process_Regatta_mainstore_vpd( mb_array, max_entries ); mem_blocks = iSeries_process_Regatta_mainstore_vpd( mb_array, max_entries );
else else
mem_blocks = iSeries_process_Condor_mainstore_vpd( mb_array, max_entries ); mem_blocks = iSeries_process_Condor_mainstore_vpd( mb_array, max_entries );
......
...@@ -357,7 +357,7 @@ static void pSeries_hpte_invalidate(unsigned long slot, unsigned long va, ...@@ -357,7 +357,7 @@ static void pSeries_hpte_invalidate(unsigned long slot, unsigned long va,
hptep->dw0.dword0 = 0; hptep->dw0.dword0 = 0;
/* Invalidate the tlb */ /* Invalidate the tlb */
if (!large && local && __is_processor(PV_POWER4)) { if (cpu_has_tlbiel() && !large && local) {
_tlbiel(va); _tlbiel(va);
} else { } else {
spin_lock_irqsave(&pSeries_tlbie_lock, flags); spin_lock_irqsave(&pSeries_tlbie_lock, flags);
...@@ -417,7 +417,7 @@ static void pSeries_flush_hash_range(unsigned long context, ...@@ -417,7 +417,7 @@ static void pSeries_flush_hash_range(unsigned long context,
hptep->dw0.dword0 = 0; hptep->dw0.dword0 = 0;
} }
if (!large && local && __is_processor(PV_POWER4)) { if (cpu_has_tlbiel() && !large && local) {
asm volatile("ptesync":::"memory"); asm volatile("ptesync":::"memory");
for (i = 0; i < j; i++) { for (i = 0; i < j; i++) {
......
...@@ -324,7 +324,7 @@ void initialize_paca_hardware_interrupt_stack(void) ...@@ -324,7 +324,7 @@ void initialize_paca_hardware_interrupt_stack(void)
* __get_free_pages() might give us a page > KERNBASE+256M which * __get_free_pages() might give us a page > KERNBASE+256M which
* is mapped with large ptes so we can't set up the guard page. * is mapped with large ptes so we can't set up the guard page.
*/ */
if (__is_processor(PV_POWER4)) if (cpu_has_largepage())
return; return;
for (i=0; i < NR_CPUS; i++) { for (i=0; i < NR_CPUS; i++) {
......
...@@ -300,6 +300,9 @@ static int show_cpuinfo(struct seq_file *m, void *v) ...@@ -300,6 +300,9 @@ static int show_cpuinfo(struct seq_file *m, void *v)
case PV_SSTAR: case PV_SSTAR:
seq_printf(m, "RS64-IV (sstar)\n"); seq_printf(m, "RS64-IV (sstar)\n");
break; break;
case PV_POWER4p:
seq_printf(m, "POWER4+ (gq)\n");
break;
case PV_630: case PV_630:
seq_printf(m, "POWER3 (630)\n"); seq_printf(m, "POWER3 (630)\n");
break; break;
......
...@@ -25,8 +25,6 @@ ...@@ -25,8 +25,6 @@
int make_ste(unsigned long stab, unsigned long esid, unsigned long vsid); int make_ste(unsigned long stab, unsigned long esid, unsigned long vsid);
void make_slbe(unsigned long esid, unsigned long vsid, int large); void make_slbe(unsigned long esid, unsigned long vsid, int large);
#define cpu_has_slb() (__is_processor(PV_POWER4))
/* /*
* Build an entry for the base kernel segment and put it into * Build an entry for the base kernel segment and put it into
* the segment table or SLB. All other segment table or SLB * the segment table or SLB. All other segment table or SLB
......
...@@ -172,7 +172,8 @@ MachineCheckException(struct pt_regs *regs) ...@@ -172,7 +172,8 @@ MachineCheckException(struct pt_regs *regs)
if (!user_mode(regs)) { if (!user_mode(regs)) {
/* Attempt to recover if the interrupt is recoverable */ /* Attempt to recover if the interrupt is recoverable */
if (regs->msr & MSR_RI) { if (regs->msr & MSR_RI) {
if (__is_processor(PV_POWER4) && if ((__is_processor(PV_POWER4) ||
__is_processor(PV_POWER4p)) &&
power4_handle_mce(regs)) power4_handle_mce(regs))
return; return;
} }
......
...@@ -57,6 +57,7 @@ ...@@ -57,6 +57,7 @@
#include <asm/tlb.h> #include <asm/tlb.h>
#include <asm/naca.h> #include <asm/naca.h>
#include <asm/eeh.h> #include <asm/eeh.h>
#include <asm/processor.h>
#include <asm/ppcdebug.h> #include <asm/ppcdebug.h>
...@@ -570,7 +571,7 @@ void flush_dcache_page(struct page *page) ...@@ -570,7 +571,7 @@ void flush_dcache_page(struct page *page)
void flush_icache_page(struct vm_area_struct *vma, struct page *page) void flush_icache_page(struct vm_area_struct *vma, struct page *page)
{ {
if (__is_processor(PV_POWER4)) if (cpu_has_noexecute())
return; return;
if ((vma->vm_flags & VM_EXEC) == 0) if ((vma->vm_flags & VM_EXEC) == 0)
...@@ -588,7 +589,7 @@ void clear_user_page(void *page, unsigned long vaddr, struct page *pg) ...@@ -588,7 +589,7 @@ void clear_user_page(void *page, unsigned long vaddr, struct page *pg)
clear_page(page); clear_page(page);
/* XXX we shouldnt have to do this, but glibc requires it */ /* XXX we shouldnt have to do this, but glibc requires it */
if (__is_processor(PV_POWER4)) if (cpu_has_noexecute())
clear_bit(PG_arch_1, &pg->flags); clear_bit(PG_arch_1, &pg->flags);
else else
__flush_dcache_icache(page); __flush_dcache_icache(page);
...@@ -608,7 +609,7 @@ void copy_user_page(void *vto, void *vfrom, unsigned long vaddr, ...@@ -608,7 +609,7 @@ void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
return; return;
#endif #endif
if (__is_processor(PV_POWER4)) if (cpu_has_noexecute())
clear_bit(PG_arch_1, &pg->flags); clear_bit(PG_arch_1, &pg->flags);
else else
__flush_dcache_icache(vto); __flush_dcache_icache(vto);
......
...@@ -542,7 +542,7 @@ insert_bpts() ...@@ -542,7 +542,7 @@ insert_bpts()
} }
} }
if (!__is_processor(PV_POWER4)) { if (!__is_processor(PV_POWER4) && !__is_processor(PV_POWER4p)) {
if (dabr.enabled) if (dabr.enabled)
set_dabr(dabr.address); set_dabr(dabr.address);
if (iabr.enabled) if (iabr.enabled)
...@@ -559,7 +559,7 @@ remove_bpts() ...@@ -559,7 +559,7 @@ remove_bpts()
if (naca->platform != PLATFORM_PSERIES) if (naca->platform != PLATFORM_PSERIES)
return; return;
if (!__is_processor(PV_POWER4)) { if (!__is_processor(PV_POWER4) && !__is_processor(PV_POWER4p)) {
set_dabr(0); set_dabr(0);
set_iabr(0); set_iabr(0);
} }
...@@ -844,7 +844,7 @@ bpt_cmds(void) ...@@ -844,7 +844,7 @@ bpt_cmds(void)
cmd = inchar(); cmd = inchar();
switch (cmd) { switch (cmd) {
case 'd': /* bd - hardware data breakpoint */ case 'd': /* bd - hardware data breakpoint */
if (__is_processor(PV_POWER4)) { if (__is_processor(PV_POWER4) || __is_processor(PV_POWER4p)) {
printf("Not implemented on POWER4\n"); printf("Not implemented on POWER4\n");
break; break;
} }
...@@ -864,7 +864,7 @@ bpt_cmds(void) ...@@ -864,7 +864,7 @@ bpt_cmds(void)
dabr.address = (dabr.address & ~7) | mode; dabr.address = (dabr.address & ~7) | mode;
break; break;
case 'i': /* bi - hardware instr breakpoint */ case 'i': /* bi - hardware instr breakpoint */
if (__is_processor(PV_POWER4)) { if (__is_processor(PV_POWER4) || __is_processor(PV_POWER4p)) {
printf("Not implemented on POWER4\n"); printf("Not implemented on POWER4\n");
break; break;
} }
......
...@@ -488,6 +488,7 @@ ...@@ -488,6 +488,7 @@
#define PV_POWER4 0x0035 #define PV_POWER4 0x0035
#define PV_ICESTAR 0x0036 #define PV_ICESTAR 0x0036
#define PV_SSTAR 0x0037 #define PV_SSTAR 0x0037
#define PV_POWER4p 0x0038
#define PV_630 0x0040 #define PV_630 0x0040
#define PV_630p 0x0041 #define PV_630p 0x0041
...@@ -712,6 +713,18 @@ static inline void prefetchw(const void *x) ...@@ -712,6 +713,18 @@ static inline void prefetchw(const void *x)
#define spin_lock_prefetch(x) prefetchw(x) #define spin_lock_prefetch(x) prefetchw(x)
#define cpu_has_largepage() (__is_processor(PV_POWER4) || \
__is_processor(PV_POWER4p))
#define cpu_has_slb() (__is_processor(PV_POWER4) || \
__is_processor(PV_POWER4p))
#define cpu_has_tlbiel() (__is_processor(PV_POWER4) || \
__is_processor(PV_POWER4p))
#define cpu_has_noexecute() (__is_processor(PV_POWER4) || \
__is_processor(PV_POWER4p))
#endif /* ASSEMBLY */ #endif /* ASSEMBLY */
#endif /* __ASM_PPC64_PROCESSOR_H */ #endif /* __ASM_PPC64_PROCESSOR_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment