Commit 28d3b363 authored by Anton Blanchard's avatar Anton Blanchard

ppc64: add POWER4+ (GQ) support

parent 8139c761
......@@ -238,7 +238,7 @@ fix_alignment(struct pt_regs *regs)
dsisr = regs->dsisr;
/* Power4 doesn't set DSISR for an alignment interrupt */
if (__is_processor(PV_POWER4))
if (__is_processor(PV_POWER4) || __is_processor(PV_POWER4p))
dsisr = make_dsisr( *((unsigned *)regs->nip) );
/* extract the operation and registers from the dsisr */
......
......@@ -139,7 +139,7 @@ htab_initialize(void)
mask = pteg_count-1;
/* XXX we currently map kernel text rw, should fix this */
if (__is_processor(PV_POWER4) && _naca->physicalMemorySize > 256*MB) {
if (cpu_has_largepage() && _naca->physicalMemorySize > 256*MB) {
create_pte_mapping((unsigned long)KERNELBASE,
KERNELBASE + 256*MB, mode_rw, mask, 0);
create_pte_mapping((unsigned long)KERNELBASE + 256*MB,
......@@ -245,9 +245,8 @@ int __hash_page(unsigned long ea, unsigned long access, unsigned long vsid,
#define PPC64_HWNOEXEC (1 << 2)
/* We do lazy icache flushing on POWER4 */
if (unlikely(__is_processor(PV_POWER4) &&
pfn_valid(pte_pfn(new_pte)))) {
/* We do lazy icache flushing on cpus that support it */
if (unlikely(cpu_has_noexecute() && pfn_valid(pte_pfn(new_pte)))) {
struct page *page = pte_page(new_pte);
/* page is dirty */
......
......@@ -258,7 +258,7 @@ unsigned long iSeries_process_mainstore_vpd( struct MemoryBlock *mb_array, unsig
{
unsigned long i;
unsigned long mem_blocks = 0;
if ( __is_processor( PV_POWER4 ) )
if (__is_processor(PV_POWER4) || __is_processor(PV_POWER4p))
mem_blocks = iSeries_process_Regatta_mainstore_vpd( mb_array, max_entries );
else
mem_blocks = iSeries_process_Condor_mainstore_vpd( mb_array, max_entries );
......
......@@ -357,7 +357,7 @@ static void pSeries_hpte_invalidate(unsigned long slot, unsigned long va,
hptep->dw0.dword0 = 0;
/* Invalidate the tlb */
if (!large && local && __is_processor(PV_POWER4)) {
if (cpu_has_tlbiel() && !large && local) {
_tlbiel(va);
} else {
spin_lock_irqsave(&pSeries_tlbie_lock, flags);
......@@ -417,7 +417,7 @@ static void pSeries_flush_hash_range(unsigned long context,
hptep->dw0.dword0 = 0;
}
if (!large && local && __is_processor(PV_POWER4)) {
if (cpu_has_tlbiel() && !large && local) {
asm volatile("ptesync":::"memory");
for (i = 0; i < j; i++) {
......
......@@ -324,7 +324,7 @@ void initialize_paca_hardware_interrupt_stack(void)
* __get_free_pages() might give us a page > KERNBASE+256M which
* is mapped with large ptes so we can't set up the guard page.
*/
if (__is_processor(PV_POWER4))
if (cpu_has_largepage())
return;
for (i=0; i < NR_CPUS; i++) {
......
......@@ -300,6 +300,9 @@ static int show_cpuinfo(struct seq_file *m, void *v)
case PV_SSTAR:
seq_printf(m, "RS64-IV (sstar)\n");
break;
case PV_POWER4p:
seq_printf(m, "POWER4+ (gq)\n");
break;
case PV_630:
seq_printf(m, "POWER3 (630)\n");
break;
......
......@@ -25,8 +25,6 @@
int make_ste(unsigned long stab, unsigned long esid, unsigned long vsid);
void make_slbe(unsigned long esid, unsigned long vsid, int large);
#define cpu_has_slb() (__is_processor(PV_POWER4))
/*
* Build an entry for the base kernel segment and put it into
* the segment table or SLB. All other segment table or SLB
......
......@@ -172,8 +172,9 @@ MachineCheckException(struct pt_regs *regs)
if (!user_mode(regs)) {
/* Attempt to recover if the interrupt is recoverable */
if (regs->msr & MSR_RI) {
if (__is_processor(PV_POWER4) &&
power4_handle_mce(regs))
if ((__is_processor(PV_POWER4) ||
__is_processor(PV_POWER4p)) &&
power4_handle_mce(regs))
return;
}
......
......@@ -57,6 +57,7 @@
#include <asm/tlb.h>
#include <asm/naca.h>
#include <asm/eeh.h>
#include <asm/processor.h>
#include <asm/ppcdebug.h>
......@@ -570,7 +571,7 @@ void flush_dcache_page(struct page *page)
void flush_icache_page(struct vm_area_struct *vma, struct page *page)
{
if (__is_processor(PV_POWER4))
if (cpu_has_noexecute())
return;
if ((vma->vm_flags & VM_EXEC) == 0)
......@@ -588,7 +589,7 @@ void clear_user_page(void *page, unsigned long vaddr, struct page *pg)
clear_page(page);
/* XXX we shouldnt have to do this, but glibc requires it */
if (__is_processor(PV_POWER4))
if (cpu_has_noexecute())
clear_bit(PG_arch_1, &pg->flags);
else
__flush_dcache_icache(page);
......@@ -608,7 +609,7 @@ void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
return;
#endif
if (__is_processor(PV_POWER4))
if (cpu_has_noexecute())
clear_bit(PG_arch_1, &pg->flags);
else
__flush_dcache_icache(vto);
......
......@@ -542,7 +542,7 @@ insert_bpts()
}
}
if (!__is_processor(PV_POWER4)) {
if (!__is_processor(PV_POWER4) && !__is_processor(PV_POWER4p)) {
if (dabr.enabled)
set_dabr(dabr.address);
if (iabr.enabled)
......@@ -559,7 +559,7 @@ remove_bpts()
if (naca->platform != PLATFORM_PSERIES)
return;
if (!__is_processor(PV_POWER4)) {
if (!__is_processor(PV_POWER4) && !__is_processor(PV_POWER4p)) {
set_dabr(0);
set_iabr(0);
}
......@@ -844,7 +844,7 @@ bpt_cmds(void)
cmd = inchar();
switch (cmd) {
case 'd': /* bd - hardware data breakpoint */
if (__is_processor(PV_POWER4)) {
if (__is_processor(PV_POWER4) || __is_processor(PV_POWER4p)) {
printf("Not implemented on POWER4\n");
break;
}
......@@ -864,7 +864,7 @@ bpt_cmds(void)
dabr.address = (dabr.address & ~7) | mode;
break;
case 'i': /* bi - hardware instr breakpoint */
if (__is_processor(PV_POWER4)) {
if (__is_processor(PV_POWER4) || __is_processor(PV_POWER4p)) {
printf("Not implemented on POWER4\n");
break;
}
......
......@@ -488,6 +488,7 @@
#define PV_POWER4 0x0035
#define PV_ICESTAR 0x0036
#define PV_SSTAR 0x0037
#define PV_POWER4p 0x0038
#define PV_630 0x0040
#define PV_630p 0x0041
......@@ -712,6 +713,18 @@ static inline void prefetchw(const void *x)
#define spin_lock_prefetch(x) prefetchw(x)
#define cpu_has_largepage() (__is_processor(PV_POWER4) || \
__is_processor(PV_POWER4p))
#define cpu_has_slb() (__is_processor(PV_POWER4) || \
__is_processor(PV_POWER4p))
#define cpu_has_tlbiel() (__is_processor(PV_POWER4) || \
__is_processor(PV_POWER4p))
#define cpu_has_noexecute() (__is_processor(PV_POWER4) || \
__is_processor(PV_POWER4p))
#endif /* ASSEMBLY */
#endif /* __ASM_PPC64_PROCESSOR_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment