Commit 2c24cc13 authored by Andrew Morton's avatar Andrew Morton Committed by Linus Torvalds

[PATCH] ppc64: preliminary iseries support, from Paul Mackerras

From: Anton Blanchard <anton@samba.org>

Preliminary iSeries support.  Still a bit hackish in parts but it does
compile.  The viodasd driver is almost completely untested so don't trust it
with your data.
parent 228ab0bd
...@@ -10,21 +10,26 @@ obj-y := setup.o entry.o traps.o irq.o idle.o \ ...@@ -10,21 +10,26 @@ obj-y := setup.o entry.o traps.o irq.o idle.o \
align.o semaphore.o bitops.o stab.o htab.o pacaData.o \ align.o semaphore.o bitops.o stab.o htab.o pacaData.o \
udbg.o binfmt_elf32.o sys_ppc32.o ioctl32.o \ udbg.o binfmt_elf32.o sys_ppc32.o ioctl32.o \
ptrace32.o signal32.o pmc.o rtc.o init_task.o \ ptrace32.o signal32.o pmc.o rtc.o init_task.o \
lmb.o pci.o pci_dn.o pci_dma.o cputable.o lmb.o cputable.o
obj-$(CONFIG_PPC_ISERIES) += iSeries_pci.o iSeries_pci_reset.o \ obj-$(CONFIG_PCI) += pci.o pci_dn.o pci_dma.o
iSeries_IoMmTable.o iSeries_irq.o \
iSeries_VpdInfo.o XmPciLpEvent.o \ ifdef CONFIG_PPC_ISERIES
obj-$(CONFIG_PCI) += iSeries_pci.o iSeries_pci_reset.o \
iSeries_IoMmTable.o
endif
obj-$(CONFIG_PPC_ISERIES) += iSeries_irq.o \
iSeries_VpdInfo.o XmPciLpEvent.o \
HvCall.o HvLpConfig.o LparData.o mf_proc.o \ HvCall.o HvLpConfig.o LparData.o mf_proc.o \
iSeries_setup.o ItLpQueue.o hvCall.o \ iSeries_setup.o ItLpQueue.o hvCall.o \
mf.o HvLpEvent.o iSeries_proc.o mf.o HvLpEvent.o iSeries_proc.o iSeries_htab.o \
proc_pmc.o
obj-$(CONFIG_PPC_PSERIES) += pSeries_pci.o pSeries_lpar.o pSeries_hvCall.o \ obj-$(CONFIG_PPC_PSERIES) += pSeries_pci.o pSeries_lpar.o pSeries_hvCall.o \
eeh.o nvram.o rtasd.o ras.o eeh.o nvram.o rtasd.o ras.o \
open_pic.o xics.o pSeries_htab.o rtas.o \
# Change this to pSeries only once we've got iSeries up to date chrp_setup.o i8259.o prom.o
obj-y += open_pic.o xics.o pSeries_htab.o rtas.o \
chrp_setup.o i8259.o prom.o
obj-$(CONFIG_PROC_FS) += proc_ppc64.o obj-$(CONFIG_PROC_FS) += proc_ppc64.o
obj-$(CONFIG_RTAS_FLASH) += rtas_flash.o obj-$(CONFIG_RTAS_FLASH) += rtas_flash.o
...@@ -32,5 +37,6 @@ obj-$(CONFIG_SMP) += smp.o ...@@ -32,5 +37,6 @@ obj-$(CONFIG_SMP) += smp.o
obj-$(CONFIG_MODULES) += module.o ppc_ksyms.o obj-$(CONFIG_MODULES) += module.o ppc_ksyms.o
obj-$(CONFIG_PPC_RTAS) += rtas-proc.o obj-$(CONFIG_PPC_RTAS) += rtas-proc.o
obj-$(CONFIG_SCANLOG) += scanlog.o obj-$(CONFIG_SCANLOG) += scanlog.o
obj-$(CONFIG_VIOPATH) += viopath.o
CFLAGS_ioctl32.o += -Ifs/ CFLAGS_ioctl32.o += -Ifs/
...@@ -65,8 +65,6 @@ ...@@ -65,8 +65,6 @@
#include <asm/ppcdebug.h> #include <asm/ppcdebug.h>
#include <asm/cputable.h> #include <asm/cputable.h>
extern volatile unsigned char *chrp_int_ack_special;
void chrp_progress(char *, unsigned short); void chrp_progress(char *, unsigned short);
extern void openpic_init_IRQ(void); extern void openpic_init_IRQ(void);
......
...@@ -275,15 +275,6 @@ _GLOBAL(_switch) ...@@ -275,15 +275,6 @@ _GLOBAL(_switch)
addi r6,r4,-THREAD /* Convert THREAD to 'current' */ addi r6,r4,-THREAD /* Convert THREAD to 'current' */
std r6,PACACURRENT(r13) /* Set new 'current' */ std r6,PACACURRENT(r13) /* Set new 'current' */
#ifdef CONFIG_PPC_ISERIES
#error fixme
ld r7,TI_FLAGS(r4) /* Get run light flag */
mfspr r9,CTRLF
srdi r7,r7,1 /* Align to run light bit in CTRL reg */
insrdi r9,r7,1,63 /* Insert run light into CTRL */
mtspr CTRLT,r9
#endif
ld r1,KSP(r4) /* Load new stack pointer */ ld r1,KSP(r4) /* Load new stack pointer */
ld r6,_CCR(r1) ld r6,_CCR(r1)
mtcrf 0xFF,r6 mtcrf 0xFF,r6
...@@ -291,6 +282,15 @@ _GLOBAL(_switch) ...@@ -291,6 +282,15 @@ _GLOBAL(_switch)
REST_8GPRS(14, r1) REST_8GPRS(14, r1)
REST_10GPRS(22, r1) REST_10GPRS(22, r1)
#ifdef CONFIG_PPC_ISERIES
clrrdi r7,r1,THREAD_SHIFT /* get current_thread_info() */
ld r7,TI_FLAGS(r7) /* Get run light flag */
mfspr r9,CTRLF
srdi r7,r7,TIF_RUN_LIGHT
insrdi r9,r7,1,63 /* Insert run light into CTRL */
mtspr CTRLT,r9
#endif
/* convert old thread to its task_struct for return value */ /* convert old thread to its task_struct for return value */
addi r3,r3,-THREAD addi r3,r3,-THREAD
ld r7,_NIP(r1) /* Return to _switch caller in new task */ ld r7,_NIP(r1) /* Return to _switch caller in new task */
...@@ -308,39 +308,16 @@ _GLOBAL(ret_from_fork) ...@@ -308,39 +308,16 @@ _GLOBAL(ret_from_fork)
b .ret_from_except b .ret_from_except
_GLOBAL(ret_from_except) _GLOBAL(ret_from_except)
#ifdef CONFIG_PPC_ISERIES
ld r5,SOFTE(r1)
cmpdi 0,r5,0
beq 4f
irq_recheck:
/* Check for pending interrupts (iSeries) */
CHECKANYINT(r3,r4)
beq+ 4f /* skip do_IRQ if no interrupts */
#warning FIX ISERIES
mfspr r5,SPRG3
li r3,0
stb r3,PACAPROCENABLED(r5) /* ensure we are disabled */
addi r3,r1,STACK_FRAME_OVERHEAD
bl .do_IRQ
b irq_recheck /* loop back and handle more */
4:
#endif
/* /*
* Disable interrupts so that current_thread_info()->flags * Disable interrupts so that current_thread_info()->flags
* can't change between when we test it and when we return * can't change between when we test it and when we return
* from the interrupt. * from the interrupt.
*/ */
recheck:
mfmsr r10 /* Get current interrupt state */ mfmsr r10 /* Get current interrupt state */
li r4,0 li r4,0
ori r4,r4,MSR_EE ori r4,r4,MSR_EE
andc r10,r10,r4 /* clear MSR_EE */ andc r9,r10,r4 /* clear MSR_EE */
mtmsrd r10,1 /* Update machine state */ mtmsrd r9,1 /* Update machine state */
#ifdef CONFIG_PPC_ISERIES
#error fix iSeries soft disable
#endif
ld r3,_MSR(r1) /* Returning to user mode? */ ld r3,_MSR(r1) /* Returning to user mode? */
andi. r3,r3,MSR_PR andi. r3,r3,MSR_PR
...@@ -364,6 +341,28 @@ recheck: ...@@ -364,6 +341,28 @@ recheck:
REST_GPR(13,r1) REST_GPR(13,r1)
restore: restore:
#ifdef CONFIG_PPC_ISERIES
ld r5,SOFTE(r1)
mfspr r4,SPRG3 /* get paca address */
cmpdi 0,r5,0
beq 4f
/* Check for pending interrupts (iSeries) */
/* this is CHECKANYINT except that we already have the paca address */
ld r3,PACALPPACA+LPPACAANYINT(r4)
cmpdi r3,0
beq+ 4f /* skip do_IRQ if no interrupts */
mfspr r13,SPRG3 /* get paca pointer back */
li r3,0
stb r3,PACAPROCENABLED(r13) /* ensure we are soft-disabled */
mtmsrd r10 /* hard-enable again */
addi r3,r1,STACK_FRAME_OVERHEAD
bl .do_IRQ
b .ret_from_except /* loop back and handle more */
4: stb r5,PACAPROCENABLED(r4)
#endif
ld r3,_CTR(r1) ld r3,_CTR(r1)
ld r0,_LINK(r1) ld r0,_LINK(r1)
mtctr r3 mtctr r3
...@@ -377,12 +376,6 @@ restore: ...@@ -377,12 +376,6 @@ restore:
stdcx. r0,0,r1 /* to clear the reservation */ stdcx. r0,0,r1 /* to clear the reservation */
#ifdef DO_SOFT_DISABLE
/* XXX do this in do_work, r13 isnt valid here */
ld r0,SOFTE(r1)
stb r0,PACAPROCENABLED(r13)
#endif
mfmsr r0 mfmsr r0
li r2, MSR_RI li r2, MSR_RI
andc r0,r0,r2 andc r0,r0,r2
...@@ -407,21 +400,21 @@ restore: ...@@ -407,21 +400,21 @@ restore:
/* Note: this must change if we start using the TIF_NOTIFY_RESUME bit */ /* Note: this must change if we start using the TIF_NOTIFY_RESUME bit */
do_work: do_work:
/* Enable interrupts */ /* Enable interrupts */
ori r10,r10,MSR_EE
mtmsrd r10,1 mtmsrd r10,1
andi. r0,r3,_TIF_NEED_RESCHED andi. r0,r3,_TIF_NEED_RESCHED
beq 1f beq 1f
bl .schedule bl .schedule
b recheck b .ret_from_except
1: andi. r0,r3,_TIF_SIGPENDING 1: andi. r0,r3,_TIF_SIGPENDING
beq recheck beq .ret_from_except
li r3,0 li r3,0
addi r4,r1,STACK_FRAME_OVERHEAD addi r4,r1,STACK_FRAME_OVERHEAD
bl .do_signal bl .do_signal
b recheck b .ret_from_except
#ifdef CONFIG_PPC_PSERIES
/* /*
* On CHRP, the Run-Time Abstraction Services (RTAS) have to be * On CHRP, the Run-Time Abstraction Services (RTAS) have to be
* called with the MMU off. * called with the MMU off.
...@@ -632,3 +625,4 @@ _GLOBAL(enter_prom) ...@@ -632,3 +625,4 @@ _GLOBAL(enter_prom)
mtlr r0 mtlr r0
blr /* return to caller */ blr /* return to caller */
#endif /* defined(CONFIG_PPC_PSERIES) */
...@@ -91,20 +91,26 @@ ...@@ -91,20 +91,26 @@
.text .text
.globl _stext .globl _stext
_stext: _stext:
#ifdef CONFIG_PPC_PSERIES
_STATIC(__start) _STATIC(__start)
b .__start_initialization_pSeries b .__start_initialization_pSeries
#endif
#ifdef CONFIG_PPC_ISERIES #ifdef CONFIG_PPC_ISERIES
/* At offset 0x20, there is a pointer to iSeries LPAR data. /*
* This is required by the hypervisor */ * At offset 0x20, there is a pointer to iSeries LPAR data.
* This is required by the hypervisor
*/
. = 0x20 . = 0x20
.llong hvReleaseData-KERNELBASE .llong hvReleaseData-KERNELBASE
/* At offset 0x28 and 0x30 are offsets to the msChunks /*
* At offset 0x28 and 0x30 are offsets to the msChunks
* array (used by the iSeries LPAR debugger to do translation * array (used by the iSeries LPAR debugger to do translation
* between physical addresses and absolute addresses) and * between physical addresses and absolute addresses) and
* to the pidhash table (also used by the debugger) */ * to the pidhash table (also used by the debugger)
*/
.llong msChunks-KERNELBASE .llong msChunks-KERNELBASE
.llong pidhash-KERNELBASE .llong 0 /* pidhash-KERNELBASE SFRXXX */
/* Offset 0x38 - Pointer to start of embedded System.map */ /* Offset 0x38 - Pointer to start of embedded System.map */
.globl embedded_sysmap_start .globl embedded_sysmap_start
...@@ -114,7 +120,7 @@ embedded_sysmap_start: ...@@ -114,7 +120,7 @@ embedded_sysmap_start:
.globl embedded_sysmap_end .globl embedded_sysmap_end
embedded_sysmap_end: embedded_sysmap_end:
.llong 0 .llong 0
#endif #else
/* Secondary processors spin on this value until it goes to 1. */ /* Secondary processors spin on this value until it goes to 1. */
.globl __secondary_hold_spinloop .globl __secondary_hold_spinloop
...@@ -164,6 +170,7 @@ _GLOBAL(__secondary_hold) ...@@ -164,6 +170,7 @@ _GLOBAL(__secondary_hold)
BUG_OPCODE BUG_OPCODE
#endif #endif
#endif #endif
#endif
/* /*
* The following macros define the code that appears as * The following macros define the code that appears as
...@@ -245,6 +252,14 @@ _GLOBAL(__secondary_hold) ...@@ -245,6 +252,14 @@ _GLOBAL(__secondary_hold)
std r22,EX_SRR0(r21); /* save SRR0 in exc. frame */ \ std r22,EX_SRR0(r21); /* save SRR0 in exc. frame */ \
ld r23,LPPACA+LPPACASRR1(r20); /* Get SRR1 from ItLpPaca */ \ ld r23,LPPACA+LPPACASRR1(r20); /* Get SRR1 from ItLpPaca */ \
std r23,EX_SRR1(r21); /* save SRR1 in exc. frame */ \ std r23,EX_SRR1(r21); /* save SRR1 in exc. frame */ \
\
mfspr r23,DAR; /* Save DAR in exc. frame */ \
std r23,EX_DAR(r21); \
mfspr r23,DSISR; /* Save DSISR in exc. frame */ \
stw r23,EX_DSISR(r21); \
mfspr r23,SPRG2; /* Save r20 in exc. frame */ \
std r23,EX_R20(r21); \
\
mfcr r23; /* save CR in r23 */ mfcr r23; /* save CR in r23 */
/* /*
...@@ -1114,7 +1129,6 @@ _GLOBAL(save_remaining_regs) ...@@ -1114,7 +1129,6 @@ _GLOBAL(save_remaining_regs)
SET_REG_TO_CONST(r22, MSR_KERNEL) SET_REG_TO_CONST(r22, MSR_KERNEL)
#ifdef DO_SOFT_DISABLE #ifdef DO_SOFT_DISABLE
#warning FIX ISERIES
stb r20,PACAPROCENABLED(r13) /* possibly soft enable */ stb r20,PACAPROCENABLED(r13) /* possibly soft enable */
ori r22,r22,MSR_EE /* always hard enable */ ori r22,r22,MSR_EE /* always hard enable */
#else #else
...@@ -1220,6 +1234,7 @@ _GLOBAL(__start_initialization_iSeries) ...@@ -1220,6 +1234,7 @@ _GLOBAL(__start_initialization_iSeries)
b .start_here_common b .start_here_common
#endif #endif
#ifdef CONFIG_PPC_PSERIES
_GLOBAL(__start_initialization_pSeries) _GLOBAL(__start_initialization_pSeries)
mr r31,r3 /* save parameters */ mr r31,r3 /* save parameters */
mr r30,r4 mr r30,r4
...@@ -1329,6 +1344,7 @@ _STATIC(__after_prom_start) ...@@ -1329,6 +1344,7 @@ _STATIC(__after_prom_start)
sub r5,r5,r27 sub r5,r5,r27
bl .copy_and_flush /* copy the rest */ bl .copy_and_flush /* copy the rest */
b .start_here_pSeries b .start_here_pSeries
#endif
/* /*
* Copy routine used to copy the kernel to start at physical address 0 * Copy routine used to copy the kernel to start at physical address 0
...@@ -1595,6 +1611,7 @@ _GLOBAL(enable_32b_mode) ...@@ -1595,6 +1611,7 @@ _GLOBAL(enable_32b_mode)
isync isync
blr blr
#ifdef CONFIG_PPC_PSERIES
/* /*
* This is where the main kernel code starts. * This is where the main kernel code starts.
*/ */
...@@ -1730,6 +1747,7 @@ _STATIC(start_here_pSeries) ...@@ -1730,6 +1747,7 @@ _STATIC(start_here_pSeries)
mtspr SRR0,r3 mtspr SRR0,r3
mtspr SRR1,r4 mtspr SRR1,r4
rfid rfid
#endif /* CONFIG_PPC_PSERIES */
/* This is where all platforms converge execution */ /* This is where all platforms converge execution */
_STATIC(start_here_common) _STATIC(start_here_common)
...@@ -1804,10 +1822,8 @@ _STATIC(start_here_common) ...@@ -1804,10 +1822,8 @@ _STATIC(start_here_common)
/* Load up the kernel context */ /* Load up the kernel context */
5: 5:
#ifdef DO_SOFT_DISABLE #ifdef DO_SOFT_DISABLE
#warning FIX ISERIES
mfspr r4,SPRG3
li r5,0 li r5,0
stb r5,PACAPROCENABLED(r4) /* Soft Disabled */ stb r5,PACAPROCENABLED(r13) /* Soft Disabled */
mfmsr r5 mfmsr r5
ori r5,r5,MSR_EE /* Hard Enabled */ ori r5,r5,MSR_EE /* Hard Enabled */
mtmsrd r5 mtmsrd r5
......
...@@ -75,6 +75,7 @@ loop_forever(void) ...@@ -75,6 +75,7 @@ loop_forever(void)
; ;
} }
#ifdef CONFIG_PPC_PSERIES
static inline void static inline void
create_pte_mapping(unsigned long start, unsigned long end, create_pte_mapping(unsigned long start, unsigned long end,
unsigned long mode, int large) unsigned long mode, int large)
...@@ -181,6 +182,7 @@ htab_initialize(void) ...@@ -181,6 +182,7 @@ htab_initialize(void)
} }
#undef KB #undef KB
#undef MB #undef MB
#endif
/* /*
* find_linux_pte returns the address of a linux pte for a given * find_linux_pte returns the address of a linux pte for a given
......
#define PCIFR(...)
/************************************************************************/ /************************************************************************/
/* This module supports the iSeries I/O Address translation mapping */ /* This module supports the iSeries I/O Address translation mapping */
/* Copyright (C) 20yy <Allan H Trautman> <IBM Corp> */ /* Copyright (C) 20yy <Allan H Trautman> <IBM Corp> */
......
/*
* iSeries hashtable management.
* Derived from pSeries_htab.c
*
* SMP scalability work:
* Copyright (C) 2001 Anton Blanchard <anton@au.ibm.com>, IBM
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <asm/machdep.h>
#include <asm/pgtable.h>
#include <asm/mmu.h>
#include <asm/mmu_context.h>
#include <asm/iSeries/HvCallHpt.h>
#include <asm/abs_addr.h>
#if 0
#include <linux/spinlock.h>
#include <linux/bitops.h>
#include <linux/threads.h>
#include <linux/smp.h>
#include <asm/tlbflush.h>
#include <asm/tlb.h>
#include <asm/cputable.h>
#endif
static long iSeries_hpte_insert(unsigned long hpte_group, unsigned long va,
unsigned long prpn, int secondary,
unsigned long hpteflags, int bolted, int large)
{
long slot;
HPTE lhpte;
/*
* The hypervisor tries both primary and secondary.
* If we are being called to insert in the secondary,
* it means we have already tried both primary and secondary,
* so we return failure immediately.
*/
if (secondary)
return -1;
slot = HvCallHpt_findValid(&lhpte, va >> PAGE_SHIFT);
if (lhpte.dw0.dw0.v)
panic("select_hpte_slot found entry already valid\n");
if (slot == -1) /* No available entry found in either group */
return -1;
if (slot < 0) { /* MSB set means secondary group */
secondary = 1;
slot &= 0x7fffffffffffffff;
}
lhpte.dw1.dword1 = 0;
lhpte.dw1.dw1.rpn = physRpn_to_absRpn(prpn);
lhpte.dw1.flags.flags = hpteflags;
lhpte.dw0.dword0 = 0;
lhpte.dw0.dw0.avpn = va >> 23;
lhpte.dw0.dw0.h = secondary;
lhpte.dw0.dw0.bolted = bolted;
lhpte.dw0.dw0.v = 1;
/* Now fill in the actual HPTE */
HvCallHpt_addValidate(slot, secondary, &lhpte);
return (secondary << 3) | (slot & 7);
}
static unsigned long iSeries_hpte_getword0(unsigned long slot)
{
unsigned long dword0;
HPTE hpte;
HvCallHpt_get(&hpte, slot);
dword0 = hpte.dw0.dword0;
return dword0;
}
static long iSeries_hpte_remove(unsigned long hpte_group)
{
unsigned long slot_offset;
int i;
HPTE lhpte;
/* Pick a random slot to start at */
slot_offset = mftb() & 0x7;
for (i = 0; i < HPTES_PER_GROUP; i++) {
lhpte.dw0.dword0 =
iSeries_hpte_getword0(hpte_group + slot_offset);
if (!lhpte.dw0.dw0.bolted) {
HvCallHpt_invalidateSetSwBitsGet(hpte_group +
slot_offset, 0, 0);
return i;
}
slot_offset++;
slot_offset &= 0x7;
}
return -1;
}
static long iSeries_hpte_updatepp(unsigned long slot, unsigned long newpp,
unsigned long va, int large, int local)
{
HPTE hpte;
unsigned long avpn = va >> 23;
HvCallHpt_get(&hpte, slot);
if ((hpte.dw0.dw0.avpn == avpn) && (hpte.dw0.dw0.v)) {
HvCallHpt_setPp(slot, newpp);
return 0;
}
return -1;
}
/*
* Functions used to find the PTE for a particular virtual address.
* Only used during boot when bolting pages.
*
* Input : vpn : virtual page number
* Output: PTE index within the page table of the entry
* -1 on failure
*/
static long iSeries_hpte_find(unsigned long vpn)
{
HPTE hpte;
long slot;
/*
* The HvCallHpt_findValid interface is as follows:
* 0xffffffffffffffff : No entry found.
* 0x00000000xxxxxxxx : Entry found in primary group, slot x
* 0x80000000xxxxxxxx : Entry found in secondary group, slot x
*/
slot = HvCallHpt_findValid(&hpte, vpn);
if (hpte.dw0.dw0.v) {
if (slot < 0) {
slot &= 0x7fffffffffffffff;
slot = -slot;
}
} else
slot = -1;
return slot;
}
/*
* Update the page protection bits. Intended to be used to create
* guard pages for kernel data structures on pages which are bolted
* in the HPT. Assumes pages being operated on will not be stolen.
* Does not work on large pages.
*
* No need to lock here because we should be the only user.
*/
static void iSeries_hpte_updateboltedpp(unsigned long newpp, unsigned long ea)
{
unsigned long vsid,va,vpn;
long slot;
vsid = get_kernel_vsid(ea);
va = (vsid << 28) | (ea & 0x0fffffff);
vpn = va >> PAGE_SHIFT;
slot = iSeries_hpte_find(vpn);
if (slot == -1)
panic("updateboltedpp: Could not find page to bolt\n");
HvCallHpt_setPp(slot, newpp);
}
static void iSeries_hpte_invalidate(unsigned long slot, unsigned long va,
int large, int local)
{
HPTE lhpte;
unsigned long avpn = va >> 23;
lhpte.dw0.dword0 = iSeries_hpte_getword0(slot);
if ((lhpte.dw0.dw0.avpn == avpn) && lhpte.dw0.dw0.v)
HvCallHpt_invalidateSetSwBitsGet(slot, 0, 0);
}
void hpte_init_iSeries(void)
{
ppc_md.hpte_invalidate = iSeries_hpte_invalidate;
ppc_md.hpte_updatepp = iSeries_hpte_updatepp;
ppc_md.hpte_updateboltedpp = iSeries_hpte_updateboltedpp;
ppc_md.hpte_insert = iSeries_hpte_insert;
ppc_md.hpte_remove = iSeries_hpte_remove;
}
...@@ -40,19 +40,21 @@ ...@@ -40,19 +40,21 @@
#include <asm/iSeries/iSeries_irq.h> #include <asm/iSeries/iSeries_irq.h>
#include <asm/iSeries/XmPciLpEvent.h> #include <asm/iSeries/XmPciLpEvent.h>
static unsigned int iSeries_startup_IRQ(unsigned int irq);
static void iSeries_shutdown_IRQ(unsigned int irq);
static void iSeries_enable_IRQ(unsigned int irq);
static void iSeries_disable_IRQ(unsigned int irq);
static void iSeries_end_IRQ(unsigned int irq);
hw_irq_controller iSeries_IRQ_handler = { static hw_irq_controller iSeries_IRQ_handler = {
"iSeries irq controller", .typename = "iSeries irq controller",
iSeries_startup_IRQ, /* startup */ .startup = iSeries_startup_IRQ,
iSeries_shutdown_IRQ, /* shutdown */ .shutdown = iSeries_shutdown_IRQ,
iSeries_enable_IRQ, /* enable */ .enable = iSeries_enable_IRQ,
iSeries_disable_IRQ, /* disable */ .disable = iSeries_disable_IRQ,
NULL, /* ack */ .end = iSeries_end_IRQ
iSeries_end_IRQ, /* end */
NULL /* set_affinity */
}; };
struct iSeries_irqEntry { struct iSeries_irqEntry {
u32 dsa; u32 dsa;
struct iSeries_irqEntry* next; struct iSeries_irqEntry* next;
...@@ -65,73 +67,97 @@ struct iSeries_irqAnchor { ...@@ -65,73 +67,97 @@ struct iSeries_irqAnchor {
struct iSeries_irqEntry* head; struct iSeries_irqEntry* head;
}; };
struct iSeries_irqAnchor iSeries_irqMap[NR_IRQS]; static struct iSeries_irqAnchor iSeries_irqMap[NR_IRQS];
#if 0
static void iSeries_init_irqMap(int irq);
#endif
void iSeries_init_irqMap(int irq); void iSeries_init_irq_desc(irq_desc_t *desc)
{
desc->handler = &iSeries_IRQ_handler;
}
/* This is called by init_IRQ. set in ppc_md.init_IRQ by iSeries_setup.c */ /* This is called by init_IRQ. set in ppc_md.init_IRQ by iSeries_setup.c */
void __init iSeries_init_IRQ(void) void __init iSeries_init_IRQ(void)
{ {
#if 0
int i; int i;
irq_desc_t *desc;
for (i = 0; i < NR_IRQS; i++) { for (i = 0; i < NR_IRQS; i++) {
irq_desc[i].handler = &iSeries_IRQ_handler; desc = get_irq_desc(i);
irq_desc[i].status = 0; desc->handler = &iSeries_IRQ_handler;
irq_desc[i].status |= IRQ_DISABLED; desc->status = 0;
irq_desc[i].depth = 1; desc->status |= IRQ_DISABLED;
desc->depth = 1;
iSeries_init_irqMap(i); iSeries_init_irqMap(i);
} }
#endif
/* Register PCI event handler and open an event path */ /* Register PCI event handler and open an event path */
PPCDBG(PPCDBG_BUSWALK,"Register PCI event handler and open an event path\n"); PPCDBG(PPCDBG_BUSWALK,
"Register PCI event handler and open an event path\n");
XmPciLpEvent_init(); XmPciLpEvent_init();
return; return;
} }
/********************************************************************** #if 0
/*
* Called by iSeries_init_IRQ * Called by iSeries_init_IRQ
* Prevent IRQs 0 and 255 from being used. IRQ 0 appears in * Prevent IRQs 0 and 255 from being used. IRQ 0 appears in
* uninitialized devices. IRQ 255 appears in the PCI interrupt * uninitialized devices. IRQ 255 appears in the PCI interrupt
* line register if a PCI error occurs, * line register if a PCI error occurs,
*********************************************************************/ */
void __init iSeries_init_irqMap(int irq) static void __init iSeries_init_irqMap(int irq)
{ {
iSeries_irqMap[irq].valid = (irq == 0 || irq == 255)? 0 : 1; iSeries_irqMap[irq].valid = ((irq == 0) || (irq == 255)) ? 0 : 1;
iSeries_irqMap[irq].entryCount = 0; iSeries_irqMap[irq].entryCount = 0;
iSeries_irqMap[irq].head = NULL; iSeries_irqMap[irq].head = NULL;
} }
#endif
/* This is called out of iSeries_scan_slot to allocate an IRQ for an EADS slot */ /*
/* It calculates the irq value for the slot. */ * This is called out of iSeries_scan_slot to allocate an IRQ for an EADS slot
int __init iSeries_allocate_IRQ(HvBusNumber busNumber, HvSubBusNumber subBusNumber, HvAgentId deviceId) * It calculates the irq value for the slot.
*/
int __init iSeries_allocate_IRQ(HvBusNumber busNumber,
HvSubBusNumber subBusNumber, HvAgentId deviceId)
{ {
u8 idsel = (deviceId >> 4); u8 idsel = (deviceId >> 4);
u8 function = deviceId & 0x0F; u8 function = deviceId & 0x0F;
int irq = ((((busNumber-1)*16 + (idsel-1)*8 + function)*9/8) % 253) + 2;
return irq; return ((((busNumber - 1) * 16 + (idsel - 1) * 8
+ function) * 9 / 8) % 253) + 2;
} }
/* This is called out of iSeries_scan_slot to assign the EADS slot to its IRQ number */ /*
int __init iSeries_assign_IRQ(int irq, HvBusNumber busNumber, HvSubBusNumber subBusNumber, HvAgentId deviceId) * This is called out of iSeries_scan_slot to assign the EADS slot
* to its IRQ number
*/
int __init iSeries_assign_IRQ(int irq, HvBusNumber busNumber,
HvSubBusNumber subBusNumber, HvAgentId deviceId)
{ {
int rc; int rc;
u32 dsa = (busNumber << 16) | (subBusNumber << 8) | deviceId; u32 dsa = (busNumber << 16) | (subBusNumber << 8) | deviceId;
struct iSeries_irqEntry* newEntry; struct iSeries_irqEntry *newEntry;
unsigned long flags; unsigned long flags;
irq_desc_t *desc = get_irq_desc(irq);
if (irq < 0 || irq >= NR_IRQS) { if ((irq < 0) || (irq >= NR_IRQS))
return -1; return -1;
}
newEntry = kmalloc(sizeof(*newEntry), GFP_KERNEL); newEntry = kmalloc(sizeof(*newEntry), GFP_KERNEL);
if (newEntry == NULL) { if (newEntry == NULL)
return -ENOMEM; return -ENOMEM;
}
newEntry->dsa = dsa; newEntry->dsa = dsa;
newEntry->next = NULL; newEntry->next = NULL;
/******************************************************************** /*
* Probably not necessary to lock the irq since allocation is only * Probably not necessary to lock the irq since allocation is only
* done during buswalk, but it should not hurt anything except a * done during buswalk, but it should not hurt anything except a
* little performance to be smp safe. * little performance to be smp safe.
*******************************************************************/ */
spin_lock_irqsave(&irq_desc[irq].lock, flags); spin_lock_irqsave(&desc->lock, flags);
if (iSeries_irqMap[irq].valid) { if (iSeries_irqMap[irq].valid) {
/* Push the new element onto the irq stack */ /* Push the new element onto the irq stack */
...@@ -139,26 +165,28 @@ int __init iSeries_assign_IRQ(int irq, HvBusNumber busNumber, HvSubBusNumber sub ...@@ -139,26 +165,28 @@ int __init iSeries_assign_IRQ(int irq, HvBusNumber busNumber, HvSubBusNumber sub
iSeries_irqMap[irq].head = newEntry; iSeries_irqMap[irq].head = newEntry;
++iSeries_irqMap[irq].entryCount; ++iSeries_irqMap[irq].entryCount;
rc = 0; rc = 0;
PPCDBG(PPCDBG_BUSWALK,"iSeries_assign_IRQ 0x%04X.%02X.%02X = 0x%04X\n",busNumber, subBusNumber, deviceId, irq); PPCDBG(PPCDBG_BUSWALK, "iSeries_assign_IRQ 0x%04X.%02X.%02X = 0x%04X\n",
} busNumber, subBusNumber, deviceId, irq);
else { } else {
printk("PCI: Something is wrong with the iSeries_irqMap. \n"); printk("PCI: Something is wrong with the iSeries_irqMap.\n");
kfree(newEntry); kfree(newEntry);
rc = -1; rc = -1;
} }
spin_unlock_irqrestore(&irq_desc[irq].lock, flags); spin_unlock_irqrestore(&desc->lock, flags);
return rc; return rc;
} }
/* This is called by iSeries_activate_IRQs */ /* This is called by iSeries_activate_IRQs */
unsigned int iSeries_startup_IRQ(unsigned int irq) static unsigned int iSeries_startup_IRQ(unsigned int irq)
{ {
struct iSeries_irqEntry* entry; struct iSeries_irqEntry *entry;
u32 bus, subBus, deviceId, function, mask; u32 bus, subBus, deviceId, function, mask;
for(entry=iSeries_irqMap[irq].head; entry!=NULL; entry=entry->next) {
bus = (entry->dsa >> 16) & 0xFFFF; for (entry = iSeries_irqMap[irq].head; entry != NULL;
subBus = (entry->dsa >> 8) & 0xFF; entry = entry->next) {
bus = (entry->dsa >> 16) & 0xFFFF;
subBus = (entry->dsa >> 8) & 0xFF;
deviceId = entry->dsa & 0xFF; deviceId = entry->dsa & 0xFF;
function = deviceId & 0x0F; function = deviceId & 0x0F;
/* Link the IRQ number to the bridge */ /* Link the IRQ number to the bridge */
...@@ -166,32 +194,41 @@ unsigned int iSeries_startup_IRQ(unsigned int irq) ...@@ -166,32 +194,41 @@ unsigned int iSeries_startup_IRQ(unsigned int irq)
/* Unmask bridge interrupts in the FISR */ /* Unmask bridge interrupts in the FISR */
mask = 0x01010000 << function; mask = 0x01010000 << function;
HvCallPci_unmaskFisr(bus, subBus, deviceId, mask); HvCallPci_unmaskFisr(bus, subBus, deviceId, mask);
PPCDBG(PPCDBG_BUSWALK,"iSeries_activate_IRQ 0x%02X.%02X.%02X Irq:0x%02X\n",bus,subBus,deviceId,irq); PPCDBG(PPCDBG_BUSWALK, "iSeries_activate_IRQ 0x%02X.%02X.%02X Irq:0x%02X\n",
bus, subBus, deviceId, irq);
} }
return 0; return 0;
} }
/* This is called out of iSeries_fixup to activate interrupt /*
* generation for usable slots */ * This is called out of iSeries_fixup to activate interrupt
* generation for usable slots
*/
void __init iSeries_activate_IRQs() void __init iSeries_activate_IRQs()
{ {
int irq; int irq;
unsigned long flags; unsigned long flags;
for (irq=0; irq < NR_IRQS; irq++) {
spin_lock_irqsave(&irq_desc[irq].lock, flags); for (irq = 0; irq < NR_IRQS; irq++) {
irq_desc[irq].handler->startup(irq); irq_desc_t *desc = get_irq_desc(irq);
spin_unlock_irqrestore(&irq_desc[irq].lock, flags);
if (desc && desc->handler && desc->handler->startup) {
spin_lock_irqsave(&desc->lock, flags);
desc->handler->startup(irq);
spin_unlock_irqrestore(&desc->lock, flags);
}
} }
} }
/* this is not called anywhere currently */ /* this is not called anywhere currently */
void iSeries_shutdown_IRQ(unsigned int irq) { static void iSeries_shutdown_IRQ(unsigned int irq)
struct iSeries_irqEntry* entry; {
struct iSeries_irqEntry *entry;
u32 bus, subBus, deviceId, function, mask; u32 bus, subBus, deviceId, function, mask;
/* irq should be locked by the caller */ /* irq should be locked by the caller */
for (entry=iSeries_irqMap[irq].head; entry; entry=entry->next) { for (entry = iSeries_irqMap[irq].head; entry; entry = entry->next) {
bus = (entry->dsa >> 16) & 0xFFFF; bus = (entry->dsa >> 16) & 0xFFFF;
subBus = (entry->dsa >> 8) & 0xFF; subBus = (entry->dsa >> 8) & 0xFF;
deviceId = entry->dsa & 0xFF; deviceId = entry->dsa & 0xFF;
...@@ -202,57 +239,60 @@ void iSeries_shutdown_IRQ(unsigned int irq) { ...@@ -202,57 +239,60 @@ void iSeries_shutdown_IRQ(unsigned int irq) {
mask = 0x01010000 << function; mask = 0x01010000 << function;
HvCallPci_maskFisr(bus, subBus, deviceId, mask); HvCallPci_maskFisr(bus, subBus, deviceId, mask);
} }
} }
/*********************************************************** /*
* This will be called by device drivers (via disable_IRQ) * This will be called by device drivers (via disable_IRQ)
* to disable INTA in the bridge interrupt status register. * to disable INTA in the bridge interrupt status register.
***********************************************************/ */
void iSeries_disable_IRQ(unsigned int irq) static void iSeries_disable_IRQ(unsigned int irq)
{ {
struct iSeries_irqEntry* entry; struct iSeries_irqEntry *entry;
u32 bus, subBus, deviceId, mask; u32 bus, subBus, deviceId, mask;
/* The IRQ has already been locked by the caller */ /* The IRQ has already been locked by the caller */
for (entry = iSeries_irqMap[irq].head; entry; entry = entry->next) {
for (entry=iSeries_irqMap[irq].head; entry; entry=entry->next) { bus = (entry->dsa >> 16) & 0xFFFF;
bus = (entry->dsa >> 16) & 0xFFFF; subBus = (entry->dsa >> 8) & 0xFF;
subBus = (entry->dsa >> 8) & 0xFF;
deviceId = entry->dsa & 0xFF; deviceId = entry->dsa & 0xFF;
/* Mask secondary INTA */ /* Mask secondary INTA */
mask = 0x80000000; mask = 0x80000000;
HvCallPci_maskInterrupts(bus, subBus, deviceId, mask); HvCallPci_maskInterrupts(bus, subBus, deviceId, mask);
PPCDBG(PPCDBG_BUSWALK,"iSeries_disable_IRQ 0x%02X.%02X.%02X 0x%04X\n",bus,subBus,deviceId,irq); PPCDBG(PPCDBG_BUSWALK,
"iSeries_disable_IRQ 0x%02X.%02X.%02X 0x%04X\n",
bus, subBus, deviceId, irq);
} }
} }
/*********************************************************** /*
* This will be called by device drivers (via enable_IRQ) * This will be called by device drivers (via enable_IRQ)
* to enable INTA in the bridge interrupt status register. * to enable INTA in the bridge interrupt status register.
***********************************************************/ */
void iSeries_enable_IRQ(unsigned int irq) static void iSeries_enable_IRQ(unsigned int irq)
{ {
struct iSeries_irqEntry* entry; struct iSeries_irqEntry *entry;
u32 bus, subBus, deviceId, mask; u32 bus, subBus, deviceId, mask;
/* The IRQ has already been locked by the caller */ /* The IRQ has already been locked by the caller */
for (entry=iSeries_irqMap[irq].head; entry; entry=entry->next) { for (entry = iSeries_irqMap[irq].head; entry; entry = entry->next) {
bus = (entry->dsa >> 16) & 0xFFFF; bus = (entry->dsa >> 16) & 0xFFFF;
subBus = (entry->dsa >> 8) & 0xFF; subBus = (entry->dsa >> 8) & 0xFF;
deviceId = entry->dsa & 0xFF; deviceId = entry->dsa & 0xFF;
/* Unmask secondary INTA */ /* Unmask secondary INTA */
mask = 0x80000000; mask = 0x80000000;
HvCallPci_unmaskInterrupts(bus, subBus, deviceId, mask); HvCallPci_unmaskInterrupts(bus, subBus, deviceId, mask);
PPCDBG(PPCDBG_BUSWALK,"iSeries_enable_IRQ 0x%02X.%02X.%02X 0x%04X\n",bus,subBus,deviceId,irq); PPCDBG(PPCDBG_BUSWALK,
"iSeries_enable_IRQ 0x%02X.%02X.%02X 0x%04X\n",
bus, subBus, deviceId, irq);
} }
} }
/* Need to define this so ppc_irq_dispatch_handler will NOT call /*
enable_IRQ at the end of interrupt handling. However, this * Need to define this so ppc_irq_dispatch_handler will NOT call
does nothing because there is not enough information provided * enable_IRQ at the end of interrupt handling. However, this does
to do the EOI HvCall. This is done by XmPciLpEvent.c */ * nothing because there is not enough information provided to do
void iSeries_end_IRQ(unsigned int irq) * the EOI HvCall. This is done by XmPciLpEvent.c
*/
static void iSeries_end_IRQ(unsigned int irq)
{ {
} }
#define PCIFR(...)
/* /*
* iSeries_pci.c * iSeries_pci.c
* *
...@@ -49,171 +50,160 @@ ...@@ -49,171 +50,160 @@
#include "iSeries_IoMmTable.h" #include "iSeries_IoMmTable.h"
#include "pci.h" #include "pci.h"
extern struct pci_controller* hose_head; extern struct pci_controller *hose_head;
extern struct pci_controller** hose_tail; extern struct pci_controller **hose_tail;
extern int global_phb_number; extern int global_phb_number;
extern int panic_timeout; extern int panic_timeout;
extern struct device_node *allnodes; extern struct device_node *allnodes;
extern unsigned long iSeries_Base_Io_Memory; extern unsigned long iSeries_Base_Io_Memory;
extern struct pci_ops iSeries_pci_ops; extern struct pci_ops iSeries_pci_ops;
extern struct flightRecorder* PciFr; extern struct flightRecorder *PciFr;
extern struct TceTable* tceTables[256]; extern struct TceTable *tceTables[256];
/*******************************************************************
* Counters and control flags.
*******************************************************************/
extern long Pci_Io_Read_Count;
extern long Pci_Io_Write_Count;
extern long Pci_Cfg_Read_Count;
extern long Pci_Cfg_Write_Count;
extern long Pci_Error_Count;
extern int Pci_Retry_Max;
extern int Pci_Error_Flag;
extern int Pci_Trace_Flag;
extern void iSeries_MmIoTest(void); extern void iSeries_MmIoTest(void);
/*
/*******************************************************************
* Forward declares of prototypes. * Forward declares of prototypes.
*******************************************************************/ */
struct iSeries_Device_Node* find_Device_Node(struct pci_dev* PciDev); static struct iSeries_Device_Node *find_Device_Node(struct pci_dev *PciDev);
struct iSeries_Device_Node* get_Device_Node(struct pci_dev* PciDev);
unsigned long find_and_init_phbs(void); unsigned long find_and_init_phbs(void);
struct pci_controller* alloc_phb(struct device_node *dev, char *model, unsigned int addr_size_words) ; static void iSeries_Scan_PHBs_Slots(struct pci_controller *Phb);
static void iSeries_Scan_EADs_Bridge(HvBusNumber Bus, HvSubBusNumber SubBus,
void iSeries_Scan_PHBs_Slots(struct pci_controller* Phb); int IdSel);
void iSeries_Scan_EADs_Bridge(HvBusNumber Bus, HvSubBusNumber SubBus, int IdSel); static int iSeries_Scan_Bridge_Slot(HvBusNumber Bus,
int iSeries_Scan_Bridge_Slot(HvBusNumber Bus, struct HvCallPci_BridgeInfo* Info); struct HvCallPci_BridgeInfo *Info);
void list_device_nodes(void);
struct pci_dev;
LIST_HEAD(iSeries_Global_Device_List); LIST_HEAD(iSeries_Global_Device_List);
int DeviceCount = 0; static int DeviceCount;
/* Counters and control flags. */ /* Counters and control flags. */
static long Pci_Io_Read_Count = 0; static long Pci_Io_Read_Count;
static long Pci_Io_Write_Count = 0; static long Pci_Io_Write_Count;
static long Pci_Cfg_Read_Count = 0; #if 0
static long Pci_Cfg_Write_Count= 0; static long Pci_Cfg_Read_Count;
static long Pci_Error_Count = 0; static long Pci_Cfg_Write_Count;
#endif
static int Pci_Retry_Max = 3; /* Only retry 3 times */ static long Pci_Error_Count;
static int Pci_Error_Flag = 1; /* Set Retry Error on. */
static int Pci_Trace_Flag = 0; static int Pci_Retry_Max = 3; /* Only retry 3 times */
static int Pci_Error_Flag = 1; /* Set Retry Error on. */
static int Pci_Trace_Flag;
/********************************************************************************** /*
* Log Error infor in Flight Recorder to system Console. * Log Error infor in Flight Recorder to system Console.
* Filter out the device not there errors. * Filter out the device not there errors.
* PCI: EADs Connect Failed 0x18.58.10 Rc: 0x00xx * PCI: EADs Connect Failed 0x18.58.10 Rc: 0x00xx
* PCI: Read Vendor Failed 0x18.58.10 Rc: 0x00xx * PCI: Read Vendor Failed 0x18.58.10 Rc: 0x00xx
* PCI: Connect Bus Unit Failed 0x18.58.10 Rc: 0x00xx * PCI: Connect Bus Unit Failed 0x18.58.10 Rc: 0x00xx
**********************************************************************************/ */
void pci_Log_Error(char* Error_Text, int Bus, int SubBus, int AgentId, int HvRc) static void pci_Log_Error(char *Error_Text, int Bus, int SubBus,
int AgentId, int HvRc)
{ {
if( HvRc != 0x0302) { if (HvRc != 0x0302) {
char ErrorString[128]; char ErrorString[128];
sprintf(ErrorString,"%s Failed: 0x%02X.%02X.%02X Rc: 0x%04X",Error_Text,Bus,SubBus,AgentId,HvRc);
sprintf(ErrorString, "%s Failed: 0x%02X.%02X.%02X Rc: 0x%04X",
Error_Text, Bus, SubBus, AgentId, HvRc);
PCIFR(ErrorString); PCIFR(ErrorString);
printk("PCI: %s\n",ErrorString); printk("PCI: %s\n", ErrorString);
} }
} }
/********************************************************************************** #if 0
/*
* Dump the iSeries Temp Device Node * Dump the iSeries Temp Device Node
*<4>buswalk [swapper : - DeviceNode: 0xC000000000634300 * <4>buswalk [swapper : - DeviceNode: 0xC000000000634300
*<4>00. Device Node = 0xC000000000634300 * <4>00. Device Node = 0xC000000000634300
*<4> - PciDev = 0x0000000000000000 * <4> - PciDev = 0x0000000000000000
*<4> - tDevice = 0x 17:01.00 0x1022 00 * <4> - tDevice = 0x 17:01.00 0x1022 00
*<4> 4. Device Node = 0xC000000000634480 * <4> 4. Device Node = 0xC000000000634480
*<4> - PciDev = 0x0000000000000000 * <4> - PciDev = 0x0000000000000000
*<4> - Device = 0x 18:38.16 Irq:0xA7 Vendor:0x1014 Flags:0x00 * <4> - Device = 0x 18:38.16 Irq:0xA7 Vendor:0x1014 Flags:0x00
*<4> - Devfn = 0xB0: 22.18 * <4> - Devfn = 0xB0: 22.18
**********************************************************************************/ */
void dumpDevice_Node(struct iSeries_Device_Node* DevNode) void dumpDevice_Node(struct iSeries_Device_Node *DevNode)
{ {
udbg_printf("Device Node = 0x%p\n",DevNode); udbg_printf("Device Node = 0x%p\n", DevNode);
udbg_printf(" - PciDev = 0x%p\n",DevNode->PciDev); udbg_printf(" - PciDev = 0x%p\n", DevNode->PciDev);
udbg_printf(" - Device = 0x%4X:%02X.%02X (0x%02X)\n", udbg_printf(" - Device = 0x%4X:%02X.%02X (0x%02X)\n",
ISERIES_BUS(DevNode), ISERIES_BUS(DevNode), ISERIES_SUBBUS(DevNode),
ISERIES_SUBBUS(DevNode), DevNode->AgentId, DevNode->DevFn);
DevNode->AgentId, udbg_printf(" - LSlot = 0x%02X\n", DevNode->LogicalSlot);
DevNode->DevFn); udbg_printf(" - TceTable = 0x%p\n ", DevNode->DevTceTable);
udbg_printf(" - LSlot = 0x%02X\n",DevNode->LogicalSlot); udbg_printf(" - DSA = 0x%04X\n", ISERIES_DSA(DevNode) >> 32);
udbg_printf(" - TceTable = 0x%p\n ",DevNode->DevTceTable);
udbg_printf(" - DSA = 0x%04X\n",ISERIES_DSA(DevNode)>>32 );
udbg_printf(" = Irq:0x%02X Vendor:0x%04X Flags:0x%02X\n", udbg_printf(" = Irq:0x%02X Vendor:0x%04X Flags:0x%02X\n",
DevNode->Irq, DevNode->Irq, DevNode->Vendor, DevNode->Flags);
DevNode->Vendor, udbg_printf(" - Location = %s\n", DevNode->CardLocation);
DevNode->Flags );
udbg_printf(" - Location = %s\n",DevNode->CardLocation);
} }
/**********************************************************************************
/*
* Walk down the device node chain * Walk down the device node chain
**********************************************************************************/ */
void list_device_nodes(void) static void list_device_nodes(void)
{ {
struct list_head* Device_Node_Ptr = iSeries_Global_Device_List.next; struct list_head *Device_Node_Ptr = iSeries_Global_Device_List.next;
while(Device_Node_Ptr != &iSeries_Global_Device_List) {
dumpDevice_Node( (struct iSeries_Device_Node*)Device_Node_Ptr ); while (Device_Node_Ptr != &iSeries_Global_Device_List) {
dumpDevice_Node((struct iSeries_Device_Node*)Device_Node_Ptr);
Device_Node_Ptr = Device_Node_Ptr->next; Device_Node_Ptr = Device_Node_Ptr->next;
} }
} }
#endif
/*********************************************************************** /*
* build_device_node(u16 Bus, int SubBus, u8 DevFn) * build_device_node(u16 Bus, int SubBus, u8 DevFn)
* */
***********************************************************************/ static struct iSeries_Device_Node *build_device_node(HvBusNumber Bus,
struct iSeries_Device_Node* build_device_node(HvBusNumber Bus, HvSubBusNumber SubBus, int AgentId, int Function) HvSubBusNumber SubBus, int AgentId, int Function)
{ {
struct iSeries_Device_Node* DeviceNode; struct iSeries_Device_Node *DeviceNode;
PPCDBG(PPCDBG_BUSWALK,"-build_device_node 0x%02X.%02X.%02X Function: %02X\n",Bus,SubBus,AgentId, Function); PPCDBG(PPCDBG_BUSWALK,
"-build_device_node 0x%02X.%02X.%02X Function: %02X\n",
Bus, SubBus, AgentId, Function);
DeviceNode = kmalloc(sizeof(struct iSeries_Device_Node), GFP_KERNEL); DeviceNode = kmalloc(sizeof(struct iSeries_Device_Node), GFP_KERNEL);
if(DeviceNode == NULL) return NULL; if (DeviceNode == NULL)
return NULL;
memset(DeviceNode,0,sizeof(struct iSeries_Device_Node) );
list_add_tail(&DeviceNode->Device_List,&iSeries_Global_Device_List); memset(DeviceNode, 0, sizeof(struct iSeries_Device_Node));
/*DeviceNode->DsaAddr = ((u64)Bus<<48)+((u64)SubBus<<40)+((u64)0x10<<32); */ list_add_tail(&DeviceNode->Device_List, &iSeries_Global_Device_List);
ISERIES_BUS(DeviceNode) = Bus; /* DeviceNode->DsaAddr =
ISERIES_SUBBUS(DeviceNode) = SubBus; ((u64)Bus << 48) + ((u64)SubBus << 40) + ((u64)0x10 << 32); */
DeviceNode->DsaAddr.deviceId = 0x10; ISERIES_BUS(DeviceNode) = Bus;
DeviceNode->DsaAddr.barNumber = 0; ISERIES_SUBBUS(DeviceNode) = SubBus;
DeviceNode->AgentId = AgentId; DeviceNode->DsaAddr.deviceId = 0x10;
DeviceNode->DevFn = PCI_DEVFN(ISERIES_ENCODE_DEVICE(AgentId),Function ); DeviceNode->DsaAddr.barNumber = 0;
DeviceNode->IoRetry = 0; DeviceNode->AgentId = AgentId;
DeviceNode->DevFn = PCI_DEVFN(ISERIES_ENCODE_DEVICE(AgentId), Function);
DeviceNode->IoRetry = 0;
iSeries_Get_Location_Code(DeviceNode); iSeries_Get_Location_Code(DeviceNode);
PCIFR("Device 0x%02X.%2X, Node:0x%p ",ISERIES_BUS(DeviceNode),ISERIES_DEVFUN(DeviceNode),DeviceNode); PCIFR("Device 0x%02X.%2X, Node:0x%p ", ISERIES_BUS(DeviceNode),
ISERIES_DEVFUN(DeviceNode), DeviceNode);
return DeviceNode; return DeviceNode;
} }
/****************************************************************************
* /*
* Allocate pci_controller(phb) initialized common variables. * Allocate pci_controller(phb) initialized common variables.
* */
*****************************************************************************/ static struct pci_controller *pci_alloc_pci_controllerX(char *model,
struct pci_controller* pci_alloc_pci_controllerX(char *model, enum phb_types controller_type) enum phb_types controller_type)
{ {
struct pci_controller *hose; struct pci_controller *hose;
hose = (struct pci_controller*)kmalloc(sizeof(struct pci_controller), GFP_KERNEL);
if(hose == NULL) return NULL; hose = (struct pci_controller *)kmalloc(sizeof(struct pci_controller),
GFP_KERNEL);
if (hose == NULL)
return NULL;
memset(hose, 0, sizeof(struct pci_controller)); memset(hose, 0, sizeof(struct pci_controller));
if(strlen(model) < 8) strcpy(hose->what,model); if (strlen(model) < 8)
else memcpy(hose->what,model,7); strcpy(hose->what, model);
else
memcpy(hose->what, model, 7);
hose->type = controller_type; hose->type = controller_type;
hose->global_number = global_phb_number; hose->global_number = global_phb_number;
global_phb_number++; global_phb_number++;
...@@ -223,8 +213,7 @@ struct pci_controller* pci_alloc_pci_controllerX(char *model, enum phb_types con ...@@ -223,8 +213,7 @@ struct pci_controller* pci_alloc_pci_controllerX(char *model, enum phb_types con
return hose; return hose;
} }
/**************************************************************************** /*
*
* unsigned int __init find_and_init_phbs(void) * unsigned int __init find_and_init_phbs(void)
* *
* Description: * Description:
...@@ -232,363 +221,388 @@ struct pci_controller* pci_alloc_pci_controllerX(char *model, enum phb_types con ...@@ -232,363 +221,388 @@ struct pci_controller* pci_alloc_pci_controllerX(char *model, enum phb_types con
* PCI buses. The system hypervisor is queried as to the guest partition * PCI buses. The system hypervisor is queried as to the guest partition
* ownership status. A pci_controller is build for any bus which is partially * ownership status. A pci_controller is build for any bus which is partially
* owned or fully owned by this guest partition. * owned or fully owned by this guest partition.
****************************************************************************/ */
unsigned long __init find_and_init_phbs(void) unsigned long __init find_and_init_phbs(void)
{ {
struct pci_controller* phb; struct pci_controller *phb;
HvBusNumber BusNumber; HvBusNumber BusNumber;
PPCDBG(PPCDBG_BUSWALK,"find_and_init_phbs Entry\n"); PPCDBG(PPCDBG_BUSWALK, "find_and_init_phbs Entry\n");
/* Check all possible buses. */ /* Check all possible buses. */
for (BusNumber = 0; BusNumber < 256; BusNumber++) { for (BusNumber = 0; BusNumber < 256; BusNumber++) {
int RtnCode = HvCallXm_testBus(BusNumber); int RtnCode = HvCallXm_testBus(BusNumber);
if (RtnCode == 0) { if (RtnCode == 0) {
phb = pci_alloc_pci_controllerX("PHB HV", phb_type_hypervisor); phb = pci_alloc_pci_controllerX("PHB HV",
if(phb == NULL) { phb_type_hypervisor);
if (phb == NULL) {
printk("PCI: Allocate pci_controller failed.\n"); printk("PCI: Allocate pci_controller failed.\n");
PCIFR( "Allocate pci_controller failed."); PCIFR("Allocate pci_controller failed.");
return -1; return -1;
} }
phb->pci_mem_offset = phb->local_number = BusNumber; phb->pci_mem_offset = phb->local_number = BusNumber;
phb->first_busno = BusNumber; phb->first_busno = BusNumber;
phb->last_busno = BusNumber; phb->last_busno = BusNumber;
phb->ops = &iSeries_pci_ops; phb->ops = &iSeries_pci_ops;
PPCDBG(PPCDBG_BUSWALK, "PCI:Create iSeries pci_controller(%p), Bus: %04X\n",phb,BusNumber); PPCDBG(PPCDBG_BUSWALK, "PCI:Create iSeries pci_controller(%p), Bus: %04X\n",
PCIFR("Create iSeries PHB controller: %04X",BusNumber); phb, BusNumber);
PCIFR("Create iSeries PHB controller: %04X", BusNumber);
/***************************************************/ /* Find and connect the devices. */
/* Find and connect the devices. */
/***************************************************/
iSeries_Scan_PHBs_Slots(phb); iSeries_Scan_PHBs_Slots(phb);
} }
/* Check for Unexpected Return code, a clue that something */ /*
/* has gone wrong. */ * Check for Unexpected Return code, a clue that something
else if(RtnCode != 0x0301) { * has gone wrong.
PCIFR("Unexpected Return on Probe(0x%04X): 0x%04X",BusNumber,RtnCode); */
} else if (RtnCode != 0x0301)
PCIFR("Unexpected Return on Probe(0x%04X): 0x%04X",
BusNumber, RtnCode);
} }
return 0; return 0;
} }
/***********************************************************************
/*
* iSeries_pcibios_init * iSeries_pcibios_init
* *
* Chance to initialize and structures or variable before PCI Bus walk. * Chance to initialize and structures or variable before PCI Bus walk.
* *
*<4>buswalk [swapper : iSeries_pcibios_init Entry. * <4>buswalk [swapper : iSeries_pcibios_init Entry.
*<4>buswalk [swapper : IoMmTable Initialized 0xC00000000034BD30 * <4>buswalk [swapper : IoMmTable Initialized 0xC00000000034BD30
*<4>buswalk [swapper : find_and_init_phbs Entry * <4>buswalk [swapper : find_and_init_phbs Entry
*<4>buswalk [swapper : Create iSeries pci_controller:(0xC00000001F5C7000), Bus 0x0017 * <4>buswalk [swapper : Create iSeries pci_controller:(0xC00000001F5C7000), Bus 0x0017
*<4>buswalk [swapper : Connect EADs: 0x17.00.12 = 0x00 * <4>buswalk [swapper : Connect EADs: 0x17.00.12 = 0x00
*<4>buswalk [swapper : iSeries_assign_IRQ 0x0017.00.12 = 0x0091 * <4>buswalk [swapper : iSeries_assign_IRQ 0x0017.00.12 = 0x0091
*<4>buswalk [swapper : - allocate and assign IRQ 0x17.00.12 = 0x91 * <4>buswalk [swapper : - allocate and assign IRQ 0x17.00.12 = 0x91
*<4>buswalk [swapper : - FoundDevice: 0x17.28.10 = 0x12AE * <4>buswalk [swapper : - FoundDevice: 0x17.28.10 = 0x12AE
*<4>buswalk [swapper : - build_device_node 0x17.28.12 * <4>buswalk [swapper : - build_device_node 0x17.28.12
*<4>buswalk [swapper : iSeries_pcibios_init Exit. * <4>buswalk [swapper : iSeries_pcibios_init Exit.
***********************************************************************/ */
void iSeries_pcibios_init(void) void iSeries_pcibios_init(void)
{ {
PPCDBG(PPCDBG_BUSWALK,"iSeries_pcibios_init Entry.\n"); PPCDBG(PPCDBG_BUSWALK, "iSeries_pcibios_init Entry.\n");
iSeries_IoMmTable_Initialize(); iSeries_IoMmTable_Initialize();
find_and_init_phbs(); find_and_init_phbs();
/* pci_assign_all_busses = 0; SFRXXX*/
pci_assign_all_busses = 0; PPCDBG(PPCDBG_BUSWALK, "iSeries_pcibios_init Exit.\n");
PPCDBG(PPCDBG_BUSWALK,"iSeries_pcibios_init Exit.\n");
} }
/***********************************************************************
/*
* pcibios_final_fixup(void) * pcibios_final_fixup(void)
***********************************************************************/ */
void __init pcibios_final_fixup(void) void __init pcibios_final_fixup(void)
{ {
struct pci_dev* PciDev = NULL; struct pci_dev *PciDev = NULL;
struct iSeries_Device_Node* DeviceNode; struct iSeries_Device_Node *DeviceNode;
char Buffer[256]; char Buffer[256];
int DeviceCount = 0; int DeviceCount = 0;
PPCDBG(PPCDBG_BUSWALK, "iSeries_pcibios_fixup Entry.\n");
PPCDBG(PPCDBG_BUSWALK,"iSeries_pcibios_fixup Entry.\n");
/******************************************************/
/* Fix up at the device node and pci_dev relationship */ /* Fix up at the device node and pci_dev relationship */
/******************************************************/
mf_displaySrc(0xC9000100); mf_displaySrc(0xC9000100);
while ((PciDev = pci_find_device(PCI_ANY_ID, PCI_ANY_ID, PciDev)) != NULL) { while ((PciDev = pci_find_device(PCI_ANY_ID, PCI_ANY_ID, PciDev))
!= NULL) {
DeviceNode = find_Device_Node(PciDev); DeviceNode = find_Device_Node(PciDev);
if(DeviceNode != NULL) { if (DeviceNode != NULL) {
++DeviceCount; ++DeviceCount;
PciDev->sysdata = (void*)DeviceNode; PciDev->sysdata = (void *)DeviceNode;
DeviceNode->PciDev = PciDev; DeviceNode->PciDev = PciDev;
PPCDBG(PPCDBG_BUSWALK,
PPCDBG(PPCDBG_BUSWALK,"PciDev 0x%p <==> DevNode 0x%p\n",PciDev,DeviceNode ); "PciDev 0x%p <==> DevNode 0x%p\n",
PciDev, DeviceNode);
iSeries_allocateDeviceBars(PciDev); iSeries_allocateDeviceBars(PciDev);
iSeries_Device_Information(PciDev, Buffer,
iSeries_Device_Information(PciDev,Buffer, sizeof(Buffer) ); sizeof(Buffer));
printk("%d. %s\n",DeviceCount,Buffer); printk("%d. %s\n", DeviceCount, Buffer);
create_pci_bus_tce_table((unsigned long)DeviceNode); create_pci_bus_tce_table((unsigned long)DeviceNode);
} else { } else
printk("PCI: Device Tree not found for 0x%016lX\n",(unsigned long)PciDev); printk("PCI: Device Tree not found for 0x%016lX\n",
} (unsigned long)PciDev);
} }
iSeries_IoMmTable_Status(); iSeries_IoMmTable_Status();
iSeries_activate_IRQs(); iSeries_activate_IRQs();
mf_displaySrc(0xC9000200); mf_displaySrc(0xC9000200);
} }
void pcibios_fixup_bus(struct pci_bus* PciBus) void pcibios_fixup_bus(struct pci_bus *PciBus)
{ {
PPCDBG(PPCDBG_BUSWALK,"iSeries_pcibios_fixup_bus(0x%04X) Entry.\n",PciBus->number); PPCDBG(PPCDBG_BUSWALK, "iSeries_pcibios_fixup_bus(0x%04X) Entry.\n",
PciBus->number);
} }
/***********************************************************************
* pcibios_fixup_resources(struct pci_dev *dev)
*
***********************************************************************/
void pcibios_fixup_resources(struct pci_dev *PciDev) void pcibios_fixup_resources(struct pci_dev *PciDev)
{ {
PPCDBG(PPCDBG_BUSWALK,"pcibios_fixup_resources PciDev %p\n",PciDev); PPCDBG(PPCDBG_BUSWALK, "fixup_resources PciDev %p\n", PciDev);
} }
/*
/******************************************************************************** * Loop through each node function to find usable EADs bridges.
* Loop through each node function to find usable EADs bridges. */
*********************************************************************************/ static void iSeries_Scan_PHBs_Slots(struct pci_controller *Phb)
void iSeries_Scan_PHBs_Slots(struct pci_controller* Phb)
{ {
struct HvCallPci_DeviceInfo* DevInfo; struct HvCallPci_DeviceInfo *DevInfo;
HvBusNumber Bus = Phb->local_number; /* System Bus */ HvBusNumber Bus = Phb->local_number; /* System Bus */
HvSubBusNumber SubBus = 0; /* EADs is always 0. */ HvSubBusNumber SubBus = 0; /* EADs is always 0. */
int HvRc = 0; int HvRc = 0;
int IdSel = 1; int IdSel = 1;
int MaxAgents = 8; int MaxAgents = 8;
DevInfo = (struct HvCallPci_DeviceInfo*)kmalloc(sizeof(struct HvCallPci_DeviceInfo), GFP_KERNEL); DevInfo = (struct HvCallPci_DeviceInfo*)
if(DevInfo == NULL) return; kmalloc(sizeof(struct HvCallPci_DeviceInfo), GFP_KERNEL);
if (DevInfo == NULL)
return;
/******************************************************************************** /*
* Probe for EADs Bridges * Probe for EADs Bridges
********************************************************************************/ */
for (IdSel=1; IdSel < MaxAgents; ++IdSel) { for (IdSel=1; IdSel < MaxAgents; ++IdSel) {
HvRc = HvCallPci_getDeviceInfo(Bus, SubBus, IdSel,REALADDR(DevInfo), sizeof(struct HvCallPci_DeviceInfo)); HvRc = HvCallPci_getDeviceInfo(Bus, SubBus, IdSel,
REALADDR(DevInfo),
sizeof(struct HvCallPci_DeviceInfo));
if (HvRc == 0) { if (HvRc == 0) {
if(DevInfo->deviceType == HvCallPci_NodeDevice) { if (DevInfo->deviceType == HvCallPci_NodeDevice)
iSeries_Scan_EADs_Bridge(Bus, SubBus, IdSel); iSeries_Scan_EADs_Bridge(Bus, SubBus, IdSel);
} else
else printk("PCI: Invalid System Configuration(0x%02X.\n",DevInfo->deviceType); printk("PCI: Invalid System Configuration(0x%02X.\n",
DevInfo->deviceType);
} }
else pci_Log_Error("getDeviceInfo",Bus, SubBus, IdSel,HvRc); else
pci_Log_Error("getDeviceInfo",Bus, SubBus, IdSel,HvRc);
} }
kfree(DevInfo); kfree(DevInfo);
} }
/******************************************************************************** static void iSeries_Scan_EADs_Bridge(HvBusNumber Bus, HvSubBusNumber SubBus,
* int IdSel)
*********************************************************************************/
void iSeries_Scan_EADs_Bridge(HvBusNumber Bus, HvSubBusNumber SubBus, int IdSel)
{ {
struct HvCallPci_BridgeInfo* BridgeInfo; struct HvCallPci_BridgeInfo *BridgeInfo;
HvAgentId AgentId; HvAgentId AgentId;
int Function; int Function;
int HvRc; int HvRc;
BridgeInfo = (struct HvCallPci_BridgeInfo*)kmalloc(sizeof(struct HvCallPci_BridgeInfo), GFP_KERNEL); BridgeInfo = (struct HvCallPci_BridgeInfo *)
if(BridgeInfo == NULL) return; kmalloc(sizeof(struct HvCallPci_BridgeInfo), GFP_KERNEL);
if (BridgeInfo == NULL)
return;
/********************************************************************* /* Note: hvSubBus and irq is always be 0 at this level! */
* Note: hvSubBus and irq is always be 0 at this level! for (Function = 0; Function < 8; ++Function) {
*********************************************************************/
for (Function=0; Function < 8; ++Function) {
AgentId = ISERIES_PCI_AGENTID(IdSel, Function); AgentId = ISERIES_PCI_AGENTID(IdSel, Function);
HvRc = HvCallXm_connectBusUnit(Bus, SubBus, AgentId, 0); HvRc = HvCallXm_connectBusUnit(Bus, SubBus, AgentId, 0);
if (HvRc == 0) { if (HvRc == 0) {
/* Connect EADs: 0x18.00.12 = 0x00 */ /* Connect EADs: 0x18.00.12 = 0x00 */
PPCDBG(PPCDBG_BUSWALK,"PCI:Connect EADs: 0x%02X.%02X.%02X\n",Bus, SubBus, AgentId); PPCDBG(PPCDBG_BUSWALK,
PCIFR( "Connect EADs: 0x%02X.%02X.%02X", Bus, SubBus, AgentId); "PCI:Connect EADs: 0x%02X.%02X.%02X\n",
HvRc = HvCallPci_getBusUnitInfo(Bus, SubBus, AgentId, Bus, SubBus, AgentId);
REALADDR(BridgeInfo), sizeof(struct HvCallPci_BridgeInfo)); PCIFR("Connect EADs: 0x%02X.%02X.%02X",
Bus, SubBus, AgentId);
HvRc = HvCallPci_getBusUnitInfo(Bus, SubBus, AgentId,
REALADDR(BridgeInfo),
sizeof(struct HvCallPci_BridgeInfo));
if (HvRc == 0) { if (HvRc == 0) {
PPCDBG(PPCDBG_BUSWALK,"PCI: BridgeInfo, Type:0x%02X, SubBus:0x%02X, MaxAgents:0x%02X, MaxSubBus: 0x%02X, LSlot: 0x%02X\n", PPCDBG(PPCDBG_BUSWALK,
BridgeInfo->busUnitInfo.deviceType, "PCI: BridgeInfo, Type:0x%02X, SubBus:0x%02X, MaxAgents:0x%02X, MaxSubBus: 0x%02X, LSlot: 0x%02X\n",
BridgeInfo->subBusNumber, BridgeInfo->busUnitInfo.deviceType,
BridgeInfo->maxAgents, BridgeInfo->subBusNumber,
BridgeInfo->maxSubBusNumber, BridgeInfo->maxAgents,
BridgeInfo->logicalSlotNumber); BridgeInfo->maxSubBusNumber,
PCIFR( "BridgeInfo, Type:0x%02X, SubBus:0x%02X, MaxAgents:0x%02X, MaxSubBus: 0x%02X, LSlot: 0x%02X", BridgeInfo->logicalSlotNumber);
BridgeInfo->busUnitInfo.deviceType, PCIFR("BridgeInfo, Type:0x%02X, SubBus:0x%02X, MaxAgents:0x%02X, MaxSubBus: 0x%02X, LSlot: 0x%02X",
BridgeInfo->subBusNumber, BridgeInfo->busUnitInfo.deviceType,
BridgeInfo->maxAgents, BridgeInfo->subBusNumber,
BridgeInfo->maxSubBusNumber, BridgeInfo->maxAgents,
BridgeInfo->logicalSlotNumber); BridgeInfo->maxSubBusNumber,
BridgeInfo->logicalSlotNumber);
if (BridgeInfo->busUnitInfo.deviceType == HvCallPci_BridgeDevice) {
if (BridgeInfo->busUnitInfo.deviceType ==
HvCallPci_BridgeDevice) {
/* Scan_Bridge_Slot...: 0x18.00.12 */ /* Scan_Bridge_Slot...: 0x18.00.12 */
iSeries_Scan_Bridge_Slot(Bus,BridgeInfo); iSeries_Scan_Bridge_Slot(Bus,
} BridgeInfo);
else printk("PCI: Invalid Bridge Configuration(0x%02X)",BridgeInfo->busUnitInfo.deviceType); } else
printk("PCI: Invalid Bridge Configuration(0x%02X)",
BridgeInfo->busUnitInfo.deviceType);
} }
} }
else if(HvRc != 0x000B) pci_Log_Error("EADs Connect",Bus,SubBus,AgentId,HvRc); else if (HvRc != 0x000B)
pci_Log_Error("EADs Connect",
Bus, SubBus, AgentId, HvRc);
} }
kfree(BridgeInfo); kfree(BridgeInfo);
} }
/******************************************************************************** /*
* * This assumes that the node slot is always on the primary bus!
* This assumes that the node slot is always on the primary bus! */
* static int iSeries_Scan_Bridge_Slot(HvBusNumber Bus,
*********************************************************************************/ struct HvCallPci_BridgeInfo *BridgeInfo)
int iSeries_Scan_Bridge_Slot(HvBusNumber Bus, struct HvCallPci_BridgeInfo* BridgeInfo)
{ {
struct iSeries_Device_Node* DeviceNode; struct iSeries_Device_Node *DeviceNode;
HvSubBusNumber SubBus = BridgeInfo->subBusNumber; HvSubBusNumber SubBus = BridgeInfo->subBusNumber;
u16 VendorId = 0; u16 VendorId = 0;
int HvRc = 0; int HvRc = 0;
u8 Irq = 0; u8 Irq = 0;
int IdSel = ISERIES_GET_DEVICE_FROM_SUBBUS(SubBus); int IdSel = ISERIES_GET_DEVICE_FROM_SUBBUS(SubBus);
int Function = ISERIES_GET_FUNCTION_FROM_SUBBUS(SubBus); int Function = ISERIES_GET_FUNCTION_FROM_SUBBUS(SubBus);
HvAgentId AgentId = ISERIES_PCI_AGENTID(IdSel, Function); HvAgentId AgentId = ISERIES_PCI_AGENTID(IdSel, Function);
HvAgentId EADsIdSel = ISERIES_PCI_AGENTID(IdSel, Function); HvAgentId EADsIdSel = ISERIES_PCI_AGENTID(IdSel, Function);
int FirstSlotId = 0; int FirstSlotId = 0;
/**********************************************************/ /* iSeries_allocate_IRQ.: 0x18.00.12(0xA3) */
/* iSeries_allocate_IRQ.: 0x18.00.12(0xA3) */ Irq = iSeries_allocate_IRQ(Bus, 0, AgentId);
/**********************************************************/
Irq = iSeries_allocate_IRQ(Bus, 0, AgentId);
iSeries_assign_IRQ(Irq, Bus, 0, AgentId); iSeries_assign_IRQ(Irq, Bus, 0, AgentId);
PPCDBG(PPCDBG_BUSWALK,"PCI:- allocate and assign IRQ 0x%02X.%02X.%02X = 0x%02X\n",Bus, 0, AgentId, Irq ); PPCDBG(PPCDBG_BUSWALK,
"PCI:- allocate and assign IRQ 0x%02X.%02X.%02X = 0x%02X\n",
Bus, 0, AgentId, Irq);
/**************************************************************************** /*
* Connect all functions of any device found. * Connect all functions of any device found.
****************************************************************************/ */
for (IdSel = 1; IdSel <= BridgeInfo->maxAgents; ++IdSel) { for (IdSel = 1; IdSel <= BridgeInfo->maxAgents; ++IdSel) {
for (Function = 0; Function < 8; ++Function) { for (Function = 0; Function < 8; ++Function) {
AgentId = ISERIES_PCI_AGENTID(IdSel, Function); AgentId = ISERIES_PCI_AGENTID(IdSel, Function);
HvRc = HvCallXm_connectBusUnit(Bus, SubBus, AgentId, Irq); HvRc = HvCallXm_connectBusUnit(Bus, SubBus,
if( HvRc == 0) { AgentId, Irq);
HvRc = HvCallPci_configLoad16(Bus, SubBus, AgentId, PCI_VENDOR_ID, &VendorId); if (HvRc == 0) {
if( HvRc == 0) { HvRc = HvCallPci_configLoad16(Bus, SubBus,
/**********************************************************/ AgentId, PCI_VENDOR_ID,
/* FoundDevice: 0x18.28.10 = 0x12AE */ &VendorId);
/**********************************************************/ if (HvRc == 0) {
PPCDBG(PPCDBG_BUSWALK,"PCI:- FoundDevice: 0x%02X.%02X.%02X = 0x%04X\n", /* FoundDevice: 0x18.28.10 = 0x12AE */
Bus, SubBus, AgentId, VendorId); PPCDBG(PPCDBG_BUSWALK,
"PCI:- FoundDevice: 0x%02X.%02X.%02X = 0x%04X\n",
HvRc = HvCallPci_configStore8(Bus, SubBus, AgentId, PCI_INTERRUPT_LINE, Irq); Bus, SubBus, AgentId, VendorId);
if( HvRc != 0) { HvRc = HvCallPci_configStore8(Bus,
pci_Log_Error("PciCfgStore Irq Failed!",Bus,SubBus,AgentId,HvRc); SubBus, AgentId,
} PCI_INTERRUPT_LINE, Irq);
if (HvRc != 0)
pci_Log_Error("PciCfgStore Irq Failed!",
Bus, SubBus,
AgentId, HvRc);
++DeviceCount; ++DeviceCount;
DeviceNode = build_device_node(Bus, SubBus, EADsIdSel, Function); DeviceNode = build_device_node(Bus,
DeviceNode->Vendor = VendorId; SubBus, EADsIdSel,
DeviceNode->Irq = Irq; Function);
DeviceNode->LogicalSlot = BridgeInfo->logicalSlotNumber; DeviceNode->Vendor = VendorId;
DeviceNode->Irq = Irq;
DeviceNode->LogicalSlot =
BridgeInfo->logicalSlotNumber;
PCIFR("Device(%4d): 0x%02X.%02X.%02X 0x%02X 0x%04X", PCIFR("Device(%4d): 0x%02X.%02X.%02X 0x%02X 0x%04X",
DeviceCount,Bus, SubBus, AgentId, DeviceCount, Bus, SubBus,
DeviceNode->LogicalSlot,DeviceNode->Vendor); AgentId,
DeviceNode->LogicalSlot,DeviceNode->Vendor);
/***********************************************************
* On the first device/function, assign irq to slot /*
***********************************************************/ * On the first device/function,
if(Function == 0) { * assign irq to slot
*/
if (Function == 0) {
FirstSlotId = AgentId; FirstSlotId = AgentId;
// AHT iSeries_assign_IRQ(Irq, Bus, SubBus, AgentId); /* AHT iSeries_assign_IRQ(Irq,
Bus, SubBus, AgentId); */
} }
} } else
else pci_Log_Error("Read Vendor",Bus,SubBus,AgentId,HvRc); pci_Log_Error("Read Vendor",
} Bus, SubBus, AgentId, HvRc);
else pci_Log_Error("Connect Bus Unit",Bus,SubBus, AgentId,HvRc); } else
pci_Log_Error("Connect Bus Unit",
Bus, SubBus, AgentId, HvRc);
} /* for (Function = 0; Function < 8; ++Function) */ } /* for (Function = 0; Function < 8; ++Function) */
} /* for (IdSel = 1; IdSel <= MaxAgents; ++IdSel) */ } /* for (IdSel = 1; IdSel <= MaxAgents; ++IdSel) */
return HvRc; return HvRc;
} }
/************************************************************************/
/* I/0 Memory copy MUST use mmio commands on iSeries */ /*
/* To do; For performance, include the hv call directly */ * I/0 Memory copy MUST use mmio commands on iSeries
/************************************************************************/ * To do; For performance, include the hv call directly
void* iSeries_memset_io(void* dest, char c, size_t Count) */
{ void *iSeries_memset_io(void *dest, char c, size_t Count)
u8 ByteValue = c; {
long NumberOfBytes = Count; u8 ByteValue = c;
char* IoBuffer = dest; long NumberOfBytes = Count;
while(NumberOfBytes > 0) { char *IoBuffer = dest;
iSeries_Write_Byte( ByteValue, (void*)IoBuffer );
while (NumberOfBytes > 0) {
iSeries_Write_Byte(ByteValue, (void *)IoBuffer);
++IoBuffer; ++IoBuffer;
-- NumberOfBytes; -- NumberOfBytes;
} }
return dest; return dest;
} }
void* iSeries_memcpy_toio(void *dest, void *source, size_t count)
{ void *iSeries_memcpy_toio(void *dest, void *source, size_t count)
char *dst = dest; {
char *src = source; char *dst = dest;
long NumberOfBytes = count; char *src = source;
while(NumberOfBytes > 0) { long NumberOfBytes = count;
iSeries_Write_Byte(*src++, (void*)dst++);
while (NumberOfBytes > 0) {
iSeries_Write_Byte(*src++, (void *)dst++);
-- NumberOfBytes; -- NumberOfBytes;
} }
return dest; return dest;
} }
void* iSeries_memcpy_fromio(void *dest, void *source, size_t count)
void *iSeries_memcpy_fromio(void *dest, void *source, size_t count)
{ {
char *dst = dest; char *dst = dest;
char *src = source; char *src = source;
long NumberOfBytes = count; long NumberOfBytes = count;
while(NumberOfBytes > 0) {
*dst++ = iSeries_Read_Byte( (void*)src++); while (NumberOfBytes > 0) {
*dst++ = iSeries_Read_Byte((void *)src++);
-- NumberOfBytes; -- NumberOfBytes;
} }
return dest; return dest;
} }
/**********************************************************************************
/*
* Look down the chain to find the matching Device Device * Look down the chain to find the matching Device Device
**********************************************************************************/ */
struct iSeries_Device_Node* find_Device_Node(struct pci_dev* PciDev) static struct iSeries_Device_Node *find_Device_Node(struct pci_dev *PciDev)
{ {
struct list_head* Device_Node_Ptr = iSeries_Global_Device_List.next; struct list_head *Device_Node_Ptr = iSeries_Global_Device_List.next;
int Bus = PciDev->bus->number; int Bus = PciDev->bus->number;
int DevFn = PciDev->devfn; int DevFn = PciDev->devfn;
while(Device_Node_Ptr != &iSeries_Global_Device_List) { while (Device_Node_Ptr != &iSeries_Global_Device_List) {
struct iSeries_Device_Node* DevNode = (struct iSeries_Device_Node*)Device_Node_Ptr; struct iSeries_Device_Node *DevNode =
if(Bus == ISERIES_BUS(DevNode) && DevFn == DevNode->DevFn) { (struct iSeries_Device_Node*)Device_Node_Ptr;
if ((Bus == ISERIES_BUS(DevNode)) && (DevFn == DevNode->DevFn))
return DevNode; return DevNode;
}
Device_Node_Ptr = Device_Node_Ptr->next; Device_Node_Ptr = Device_Node_Ptr->next;
} }
return NULL; return NULL;
} }
/******************************************************************/
/* Returns the device node for the passed pci_dev */ #if 0
/* Sanity Check Node PciDev to passed pci_dev */ /*
/* If none is found, returns a NULL which the client must handle. */ * Returns the device node for the passed pci_dev
/******************************************************************/ * Sanity Check Node PciDev to passed pci_dev
struct iSeries_Device_Node* get_Device_Node(struct pci_dev* PciDev) * If none is found, returns a NULL which the client must handle.
*/
static struct iSeries_Device_Node *get_Device_Node(struct pci_dev *PciDev)
{ {
struct iSeries_Device_Node* Node; struct iSeries_Device_Node *Node;
Node = (struct iSeries_Device_Node*)PciDev->sysdata;
if(Node == NULL ) { Node = (struct iSeries_Device_Node *)PciDev->sysdata;
if (Node == NULL)
Node = find_Device_Node(PciDev); Node = find_Device_Node(PciDev);
} else if (Node->PciDev != PciDev)
else if(Node->PciDev != PciDev) {
Node = find_Device_Node(PciDev); Node = find_Device_Node(PciDev);
}
return Node; return Node;
} }
/********************************************************************************** #endif
*
/*
* Read PCI Config Space Code * Read PCI Config Space Code
* */
**********************************************************************************/ #if 0
/** BYTE *************************************************************************/ /** BYTE ********************************************************************/
int iSeries_Node_read_config_byte(struct iSeries_Device_Node* DevNode, int Offset, u8* ReadValue) int iSeries_Node_read_config_byte(struct iSeries_Device_Node* DevNode, int Offset, u8* ReadValue)
{ {
u8 ReadData; u8 ReadData;
...@@ -606,63 +620,25 @@ int iSeries_Node_read_config_byte(struct iSeries_Device_Node* DevNode, int Offse ...@@ -606,63 +620,25 @@ int iSeries_Node_read_config_byte(struct iSeries_Device_Node* DevNode, int Offse
*ReadValue = ReadData; *ReadValue = ReadData;
return DevNode->ReturnCode; return DevNode->ReturnCode;
} }
/** WORD *************************************************************************/
int iSeries_Node_read_config_word(struct iSeries_Device_Node* DevNode, int Offset, u16* ReadValue)
{
u16 ReadData;
if(DevNode == NULL) { return 0x301; }
++Pci_Cfg_Read_Count;
DevNode->ReturnCode = HvCallPci_configLoad16(ISERIES_BUS(DevNode),ISERIES_SUBBUS(DevNode),0x10,
Offset,&ReadData);
if(Pci_Trace_Flag == 1) {
PCIFR("RCW: 0x%04X.%02X 0x%04X = 0x%04X",ISERIES_BUS(DevNode),DevNode->DevFn,Offset,ReadData);
}
if(DevNode->ReturnCode != 0 ) {
printk("PCI: RCW: 0x%04X.%02X Error: 0x%04X\n",ISERIES_BUS(DevNode),DevNode->DevFn,DevNode->ReturnCode);
PCIFR( "RCW: 0x%04X.%02X Error: 0x%04X", ISERIES_BUS(DevNode),DevNode->DevFn,DevNode->ReturnCode);
}
*ReadValue = ReadData;
return DevNode->ReturnCode;
}
/** DWORD *************************************************************************/
int iSeries_Node_read_config_dword(struct iSeries_Device_Node* DevNode, int Offset, u32* ReadValue)
{
u32 ReadData;
if(DevNode == NULL) { return 0x301; }
++Pci_Cfg_Read_Count;
DevNode->ReturnCode = HvCallPci_configLoad32(ISERIES_BUS(DevNode),ISERIES_SUBBUS(DevNode),0x10,
Offset,&ReadData);
if(Pci_Trace_Flag == 1) {
PCIFR("RCL: 0x%04X.%02X 0x%04X = 0x%08X",ISERIES_BUS(DevNode),DevNode->DevFn,Offset,ReadData);
}
if(DevNode->ReturnCode != 0 ) {
printk("PCI: RCL: 0x%04X.%02X Error: 0x%04X\n",ISERIES_BUS(DevNode),DevNode->DevFn,DevNode->ReturnCode);
PCIFR( "RCL: 0x%04X.%02X Error: 0x%04X", ISERIES_BUS(DevNode),DevNode->DevFn,DevNode->ReturnCode);
}
*ReadValue = ReadData;
return DevNode->ReturnCode;
}
int iSeries_pci_read_config_byte(struct pci_dev* PciDev, int Offset, u8* ReadValue) { int iSeries_pci_read_config_byte(struct pci_dev* PciDev, int Offset, u8* ReadValue) {
struct iSeries_Device_Node* DevNode = get_Device_Node(PciDev); struct iSeries_Device_Node* DevNode = get_Device_Node(PciDev);
if(DevNode == NULL) return 0x0301; if(DevNode == NULL) return 0x0301;
return iSeries_Node_read_config_byte( DevNode ,Offset,ReadValue); return iSeries_Node_read_config_byte( DevNode ,Offset,ReadValue);
} }
int iSeries_pci_read_config_word(struct pci_dev* PciDev, int Offset, u16* ReadValue) { #endif
struct iSeries_Device_Node* DevNode = get_Device_Node(PciDev);
if(DevNode == NULL) return 0x0301; static int iSeries_pci_read_config(struct pci_bus *bus, unsigned int devfn,
return iSeries_Node_read_config_word( DevNode ,Offset,ReadValue ); int offset, int size, u32 *val)
} {
int iSeries_pci_read_config_dword(struct pci_dev* PciDev, int Offset, u32* ReadValue) { return PCIBIOS_DEVICE_NOT_FOUND;
struct iSeries_Device_Node* DevNode = get_Device_Node(PciDev);
if(DevNode == NULL) return 0x0301;
return iSeries_Node_read_config_dword(DevNode ,Offset,ReadValue );
} }
/**********************************************************************************/
/* */ /*
/* Write PCI Config Space */ * Write PCI Config Space
/* */ */
/** BYTE *************************************************************************/ #if 0
/** BYTE ********************************************************************/
int iSeries_Node_write_config_byte(struct iSeries_Device_Node* DevNode, int Offset, u8 WriteData) int iSeries_Node_write_config_byte(struct iSeries_Device_Node* DevNode, int Offset, u8 WriteData)
{ {
++Pci_Cfg_Write_Count; ++Pci_Cfg_Write_Count;
...@@ -677,228 +653,220 @@ int iSeries_Node_write_config_byte(struct iSeries_Device_Node* DevNode, int Offs ...@@ -677,228 +653,220 @@ int iSeries_Node_write_config_byte(struct iSeries_Device_Node* DevNode, int Offs
} }
return DevNode->ReturnCode; return DevNode->ReturnCode;
} }
/** WORD *************************************************************************/
int iSeries_Node_write_config_word(struct iSeries_Device_Node* DevNode, int Offset, u16 WriteData)
{
++Pci_Cfg_Write_Count;
DevNode->ReturnCode = HvCallPci_configStore16(ISERIES_BUS(DevNode),ISERIES_SUBBUS(DevNode),0x10,
Offset,WriteData);
if(Pci_Trace_Flag == 1) {
PCIFR("WCW: 0x%04X.%02X 0x%04X = 0x%04X",ISERIES_BUS(DevNode),DevNode->DevFn,Offset,WriteData);
}
if(DevNode->ReturnCode != 0 ) {
printk("PCI: WCW: 0x%04X.%02X Error: 0x%04X\n",ISERIES_BUS(DevNode),DevNode->DevFn,DevNode->ReturnCode);
PCIFR( "WCW: 0x%04X.%02X Error: 0x%04X", ISERIES_BUS(DevNode),DevNode->DevFn,DevNode->ReturnCode);
}
return DevNode->ReturnCode;
}
/** DWORD *************************************************************************/
int iSeries_Node_write_config_dword(struct iSeries_Device_Node* DevNode, int Offset, u32 WriteData)
{
++Pci_Cfg_Write_Count;
DevNode->ReturnCode = HvCallPci_configStore32(ISERIES_BUS(DevNode),ISERIES_SUBBUS(DevNode),0x10,
Offset,WriteData);
if(Pci_Trace_Flag == 1) {
PCIFR("WCL: 0x%04X.%02X 0x%04X = 0x%08X",ISERIES_BUS(DevNode),DevNode->DevFn,Offset,WriteData);
}
if(DevNode->ReturnCode != 0 ) {
printk("PCI: WCL: 0x%04X.%02X Error: 0x%04X\n",ISERIES_BUS(DevNode),DevNode->DevFn,DevNode->ReturnCode);
PCIFR( "WCL: 0x%04X.%02X Error: 0x%04X", ISERIES_BUS(DevNode),DevNode->DevFn,DevNode->ReturnCode);
}
return DevNode->ReturnCode;
}
int iSeries_pci_write_config_byte( struct pci_dev* PciDev,int Offset, u8 WriteValue) int iSeries_pci_write_config_byte( struct pci_dev* PciDev,int Offset, u8 WriteValue)
{ {
struct iSeries_Device_Node* DevNode = get_Device_Node(PciDev); struct iSeries_Device_Node* DevNode = get_Device_Node(PciDev);
if(DevNode == NULL) return 0x0301; if(DevNode == NULL) return 0x0301;
return iSeries_Node_write_config_byte( DevNode,Offset,WriteValue); return iSeries_Node_write_config_byte( DevNode,Offset,WriteValue);
} }
int iSeries_pci_write_config_word( struct pci_dev* PciDev,int Offset,u16 WriteValue) #endif
{
struct iSeries_Device_Node* DevNode = get_Device_Node(PciDev); static int iSeries_pci_write_config(struct pci_bus *bus, unsigned int devfn,
if(DevNode == NULL) return 0x0301; int offset, int size, u32 val)
return iSeries_Node_write_config_word( DevNode,Offset,WriteValue);
}
int iSeries_pci_write_config_dword(struct pci_dev* PciDev,int Offset,u32 WriteValue)
{ {
struct iSeries_Device_Node* DevNode = get_Device_Node(PciDev); return PCIBIOS_DEVICE_NOT_FOUND;
if(DevNode == NULL) return 0x0301;
return iSeries_Node_write_config_dword(DevNode,Offset,WriteValue);
} }
/************************************************************************/
/* Branch Table */
/************************************************************************/
struct pci_ops iSeries_pci_ops = { struct pci_ops iSeries_pci_ops = {
iSeries_pci_read_config_byte, .read = iSeries_pci_read_config,
iSeries_pci_read_config_word, .write = iSeries_pci_write_config
iSeries_pci_read_config_dword,
iSeries_pci_write_config_byte,
iSeries_pci_write_config_word,
iSeries_pci_write_config_dword
}; };
/************************************************************************ /*
* Check Return Code * Check Return Code
* -> On Failure, print and log information. * -> On Failure, print and log information.
* Increment Retry Count, if exceeds max, panic partition. * Increment Retry Count, if exceeds max, panic partition.
* -> If in retry, print and log success * -> If in retry, print and log success
************************************************************************ *
* PCI: Device 23.90 ReadL I/O Error( 0): 0x1234 * PCI: Device 23.90 ReadL I/O Error( 0): 0x1234
* PCI: Device 23.90 ReadL Retry( 1) * PCI: Device 23.90 ReadL Retry( 1)
* PCI: Device 23.90 ReadL Retry Successful(1) * PCI: Device 23.90 ReadL Retry Successful(1)
************************************************************************/ */
int CheckReturnCode(char* TextHdr, struct iSeries_Device_Node* DevNode, u64 RtnCode) int CheckReturnCode(char *TextHdr, struct iSeries_Device_Node *DevNode,
u64 RtnCode)
{ {
if(RtnCode != 0) { if (RtnCode != 0) {
++Pci_Error_Count; ++Pci_Error_Count;
++DevNode->IoRetry; ++DevNode->IoRetry;
PCIFR( "%s: Device 0x%04X:%02X I/O Error(%2d): 0x%04X", PCIFR("%s: Device 0x%04X:%02X I/O Error(%2d): 0x%04X",
TextHdr,ISERIES_BUS(DevNode),DevNode->DevFn,DevNode->IoRetry,(int)RtnCode); TextHdr, ISERIES_BUS(DevNode), DevNode->DevFn,
DevNode->IoRetry, (int)RtnCode);
printk("PCI: %s: Device 0x%04X:%02X I/O Error(%2d): 0x%04X\n", printk("PCI: %s: Device 0x%04X:%02X I/O Error(%2d): 0x%04X\n",
TextHdr,ISERIES_BUS(DevNode),DevNode->DevFn,DevNode->IoRetry,(int)RtnCode); TextHdr, ISERIES_BUS(DevNode), DevNode->DevFn,
/*******************************************************/ DevNode->IoRetry, (int)RtnCode);
/* Bump the retry and check for retry count exceeded. */ /*
/* If, Exceeded, panic the system. */ * Bump the retry and check for retry count exceeded.
/*******************************************************/ * If, Exceeded, panic the system.
if(DevNode->IoRetry > Pci_Retry_Max && Pci_Error_Flag > 0 ) { */
if ((DevNode->IoRetry > Pci_Retry_Max) &&
(Pci_Error_Flag > 0)) {
mf_displaySrc(0xB6000103); mf_displaySrc(0xB6000103);
panic_timeout = 0; panic_timeout = 0;
panic("PCI: Hardware I/O Error, SRC B6000103, Automatic Reboot Disabled.\n"); panic("PCI: Hardware I/O Error, SRC B6000103, "
"Automatic Reboot Disabled.\n");
} }
return -1; /* Retry Try */ return -1; /* Retry Try */
} }
/******************************************************************** /* If retry was in progress, log success and rest retry count */
* If retry was in progress, log success and rest retry count * if (DevNode->IoRetry > 0) {
*********************************************************************/
else if(DevNode->IoRetry > 0) {
PCIFR("%s: Device 0x%04X:%02X Retry Successful(%2d).", PCIFR("%s: Device 0x%04X:%02X Retry Successful(%2d).",
TextHdr,ISERIES_BUS(DevNode),DevNode->DevFn,DevNode->IoRetry); TextHdr, ISERIES_BUS(DevNode), DevNode->DevFn,
DevNode->IoRetry);
DevNode->IoRetry = 0; DevNode->IoRetry = 0;
return 0;
} }
return 0; return 0;
} }
/************************************************************************/
/* Translate the I/O Address into a device node, bar, and bar offset. */ /*
/* Note: Make sure the passed variable end up on the stack to avoid */ * Translate the I/O Address into a device node, bar, and bar offset.
/* the exposure of being device global. */ * Note: Make sure the passed variable end up on the stack to avoid
/************************************************************************/ * the exposure of being device global.
static inline struct iSeries_Device_Node* xlateIoMmAddress(void* IoAddress, */
union HvDsaMap* DsaPtr, static inline struct iSeries_Device_Node *xlateIoMmAddress(void *IoAddress,
u64* BarOffsetPtr) { union HvDsaMap *DsaPtr, u64 *BarOffsetPtr)
{
unsigned long BaseIoAddr = (unsigned long)IoAddress-iSeries_Base_Io_Memory; unsigned long BaseIoAddr =
long TableIndex = BaseIoAddr/iSeries_IoMmTable_Entry_Size; (unsigned long)IoAddress - iSeries_Base_Io_Memory;
struct iSeries_Device_Node* DevNode = *(iSeries_IoMmTable +TableIndex); long TableIndex = BaseIoAddr / iSeries_IoMmTable_Entry_Size;
if(DevNode != NULL) { struct iSeries_Device_Node *DevNode = *(iSeries_IoMmTable + TableIndex);
DsaPtr->DsaAddr = ISERIES_DSA(DevNode);
DsaPtr->Dsa.barNumber = *(iSeries_IoBarTable+TableIndex); if (DevNode != NULL) {
*BarOffsetPtr = BaseIoAddr % iSeries_IoMmTable_Entry_Size; DsaPtr->DsaAddr = ISERIES_DSA(DevNode);
} DsaPtr->Dsa.barNumber = *(iSeries_IoBarTable + TableIndex);
else { *BarOffsetPtr = BaseIoAddr % iSeries_IoMmTable_Entry_Size;
} else
panic("PCI: Invalid PCI IoAddress detected!\n"); panic("PCI: Invalid PCI IoAddress detected!\n");
}
return DevNode; return DevNode;
} }
/************************************************************************/ /*
/* Read MM I/O Instructions for the iSeries */ * Read MM I/O Instructions for the iSeries
/* On MM I/O error, all ones are returned and iSeries_pci_IoError is cal*/ * On MM I/O error, all ones are returned and iSeries_pci_IoError is cal
/* else, data is returned in big Endian format. */ * else, data is returned in big Endian format.
/************************************************************************/ *
/* iSeries_Read_Byte = Read Byte ( 8 bit) */ * iSeries_Read_Byte = Read Byte ( 8 bit)
/* iSeries_Read_Word = Read Word (16 bit) */ * iSeries_Read_Word = Read Word (16 bit)
/* iSeries_Read_Long = Read Long (32 bit) */ * iSeries_Read_Long = Read Long (32 bit)
/************************************************************************/ */
u8 iSeries_Read_Byte(void* IoAddress) u8 iSeries_Read_Byte(void *IoAddress)
{ {
u64 BarOffset; u64 BarOffset;
union HvDsaMap DsaData; union HvDsaMap DsaData;
struct HvCallPci_LoadReturn Return; struct HvCallPci_LoadReturn Return;
struct iSeries_Device_Node* DevNode = xlateIoMmAddress(IoAddress,&DsaData,&BarOffset); struct iSeries_Device_Node *DevNode =
xlateIoMmAddress(IoAddress, &DsaData, &BarOffset);
do { do {
++Pci_Io_Read_Count; ++Pci_Io_Read_Count;
HvCall3Ret16(HvCallPciBarLoad8, &Return, DsaData.DsaAddr,BarOffset, 0); HvCall3Ret16(HvCallPciBarLoad8, &Return, DsaData.DsaAddr,
} while (CheckReturnCode("RDB",DevNode, Return.rc) != 0); BarOffset, 0);
} while (CheckReturnCode("RDB", DevNode, Return.rc) != 0);
if(Pci_Trace_Flag == 1) PCIFR("RDB: IoAddress 0x%p = 0x%02X",IoAddress, (u8)Return.value); if (Pci_Trace_Flag == 1)
PCIFR("RDB: IoAddress 0x%p = 0x%02X", IoAddress,
(u8)Return.value);
return (u8)Return.value; return (u8)Return.value;
} }
u16 iSeries_Read_Word(void* IoAddress)
u16 iSeries_Read_Word(void *IoAddress)
{ {
u64 BarOffset; u64 BarOffset;
union HvDsaMap DsaData; union HvDsaMap DsaData;
struct HvCallPci_LoadReturn Return; struct HvCallPci_LoadReturn Return;
struct iSeries_Device_Node* DevNode = xlateIoMmAddress(IoAddress,&DsaData,&BarOffset); struct iSeries_Device_Node *DevNode =
xlateIoMmAddress(IoAddress, &DsaData, &BarOffset);
do { do {
++Pci_Io_Read_Count; ++Pci_Io_Read_Count;
HvCall3Ret16(HvCallPciBarLoad16,&Return, DsaData.DsaAddr,BarOffset, 0); HvCall3Ret16(HvCallPciBarLoad16, &Return, DsaData.DsaAddr,
} while (CheckReturnCode("RDW",DevNode, Return.rc) != 0); BarOffset, 0);
} while (CheckReturnCode("RDW", DevNode, Return.rc) != 0);
if(Pci_Trace_Flag == 1) PCIFR("RDW: IoAddress 0x%p = 0x%04X",IoAddress, swab16((u16)Return.value)); if (Pci_Trace_Flag == 1)
PCIFR("RDW: IoAddress 0x%p = 0x%04X", IoAddress,
swab16((u16)Return.value));
return swab16((u16)Return.value); return swab16((u16)Return.value);
} }
u32 iSeries_Read_Long(void* IoAddress)
u32 iSeries_Read_Long(void *IoAddress)
{ {
u64 BarOffset; u64 BarOffset;
union HvDsaMap DsaData; union HvDsaMap DsaData;
struct HvCallPci_LoadReturn Return; struct HvCallPci_LoadReturn Return;
struct iSeries_Device_Node* DevNode = xlateIoMmAddress(IoAddress,&DsaData,&BarOffset); struct iSeries_Device_Node *DevNode =
xlateIoMmAddress(IoAddress, &DsaData, &BarOffset);
do { do {
++Pci_Io_Read_Count; ++Pci_Io_Read_Count;
HvCall3Ret16(HvCallPciBarLoad32,&Return, DsaData.DsaAddr,BarOffset, 0); HvCall3Ret16(HvCallPciBarLoad32, &Return, DsaData.DsaAddr,
} while (CheckReturnCode("RDL",DevNode, Return.rc) != 0); BarOffset, 0);
} while (CheckReturnCode("RDL", DevNode, Return.rc) != 0);
if(Pci_Trace_Flag == 1) PCIFR("RDL: IoAddress 0x%p = 0x%04X",IoAddress, swab32((u32)Return.value)); if (Pci_Trace_Flag == 1)
PCIFR("RDL: IoAddress 0x%p = 0x%04X", IoAddress,
swab32((u32)Return.value));
return swab32((u32)Return.value); return swab32((u32)Return.value);
} }
/************************************************************************/
/* Write MM I/O Instructions for the iSeries */ /*
/************************************************************************/ * Write MM I/O Instructions for the iSeries
/* iSeries_Write_Byte = Write Byte (8 bit) */ *
/* iSeries_Write_Word = Write Word(16 bit) */ * iSeries_Write_Byte = Write Byte (8 bit)
/* iSeries_Write_Long = Write Long(32 bit) */ * iSeries_Write_Word = Write Word(16 bit)
/************************************************************************/ * iSeries_Write_Long = Write Long(32 bit)
void iSeries_Write_Byte(u8 Data, void* IoAddress) */
{ void iSeries_Write_Byte(u8 Data, void *IoAddress)
u64 BarOffset; {
union HvDsaMap DsaData; u64 BarOffset;
union HvDsaMap DsaData;
struct HvCallPci_LoadReturn Return; struct HvCallPci_LoadReturn Return;
struct iSeries_Device_Node* DevNode = xlateIoMmAddress(IoAddress,&DsaData,&BarOffset); struct iSeries_Device_Node *DevNode =
xlateIoMmAddress(IoAddress, &DsaData, &BarOffset);
do { do {
++Pci_Io_Write_Count; ++Pci_Io_Write_Count;
Return.rc = HvCall4(HvCallPciBarStore8, DsaData.DsaAddr,BarOffset, Data, 0); Return.rc = HvCall4(HvCallPciBarStore8, DsaData.DsaAddr,
} while (CheckReturnCode("WWB",DevNode, Return.rc) != 0); BarOffset, Data, 0);
if(Pci_Trace_Flag == 1) PCIFR("WWB: IoAddress 0x%p = 0x%02X",IoAddress,Data); } while (CheckReturnCode("WWB", DevNode, Return.rc) != 0);
if (Pci_Trace_Flag == 1)
PCIFR("WWB: IoAddress 0x%p = 0x%02X", IoAddress, Data);
} }
void iSeries_Write_Word(u16 Data, void* IoAddress)
void iSeries_Write_Word(u16 Data, void *IoAddress)
{ {
u64 BarOffset; u64 BarOffset;
union HvDsaMap DsaData; union HvDsaMap DsaData;
struct HvCallPci_LoadReturn Return; struct HvCallPci_LoadReturn Return;
struct iSeries_Device_Node* DevNode = xlateIoMmAddress(IoAddress,&DsaData,&BarOffset); struct iSeries_Device_Node *DevNode =
xlateIoMmAddress(IoAddress, &DsaData, &BarOffset);
do { do {
++Pci_Io_Write_Count; ++Pci_Io_Write_Count;
Return.rc = HvCall4(HvCallPciBarStore16,DsaData.DsaAddr,BarOffset, swab16(Data), 0); Return.rc = HvCall4(HvCallPciBarStore16, DsaData.DsaAddr,
} while (CheckReturnCode("WWW",DevNode, Return.rc) != 0); BarOffset, swab16(Data), 0);
if(Pci_Trace_Flag == 1) PCIFR("WWW: IoAddress 0x%p = 0x%04X",IoAddress,Data); } while (CheckReturnCode("WWW", DevNode, Return.rc) != 0);
if (Pci_Trace_Flag == 1)
PCIFR("WWW: IoAddress 0x%p = 0x%04X", IoAddress, Data);
} }
void iSeries_Write_Long(u32 Data, void* IoAddress)
void iSeries_Write_Long(u32 Data, void *IoAddress)
{ {
u64 BarOffset; u64 BarOffset;
union HvDsaMap DsaData; union HvDsaMap DsaData;
struct HvCallPci_LoadReturn Return; struct HvCallPci_LoadReturn Return;
struct iSeries_Device_Node* DevNode = xlateIoMmAddress(IoAddress,&DsaData,&BarOffset); struct iSeries_Device_Node *DevNode =
xlateIoMmAddress(IoAddress, &DsaData, &BarOffset);
do { do {
++Pci_Io_Write_Count; ++Pci_Io_Write_Count;
Return.rc = HvCall4(HvCallPciBarStore32,DsaData.DsaAddr,BarOffset, swab32(Data), 0); Return.rc = HvCall4(HvCallPciBarStore32, DsaData.DsaAddr,
} while (CheckReturnCode("WWL",DevNode, Return.rc) != 0); BarOffset, swab32(Data), 0);
if(Pci_Trace_Flag == 1) PCIFR("WWL: IoAddress 0x%p = 0x%08X",IoAddress, Data); } while (CheckReturnCode("WWL", DevNode, Return.rc) != 0);
if (Pci_Trace_Flag == 1)
PCIFR("WWL: IoAddress 0x%p = 0x%08X", IoAddress, Data);
}
void pcibios_name_device(struct pci_dev *dev)
{
} }
#define PCIFR(...)
/************************************************************************/ /************************************************************************/
/* File iSeries_pci_reset.c created by Allan Trautman on Mar 21 2001. */ /* File iSeries_pci_reset.c created by Allan Trautman on Mar 21 2001. */
/************************************************************************/ /************************************************************************/
......
...@@ -27,9 +27,8 @@ ...@@ -27,9 +27,8 @@
#include <asm/iSeries/iSeries_proc.h> #include <asm/iSeries/iSeries_proc.h>
#endif #endif
static struct proc_dir_entry *iSeries_proc_root;
static struct proc_dir_entry * iSeries_proc_root = NULL; static int iSeries_proc_initializationDone;
static int iSeries_proc_initializationDone = 0;
static spinlock_t iSeries_proc_lock; static spinlock_t iSeries_proc_lock;
struct iSeries_proc_registration struct iSeries_proc_registration
...@@ -96,21 +95,22 @@ void iSeries_proc_create(void) ...@@ -96,21 +95,22 @@ void iSeries_proc_create(void)
{ {
unsigned long flags; unsigned long flags;
struct iSeries_proc_registration *reg = NULL; struct iSeries_proc_registration *reg = NULL;
spin_lock_irqsave(&iSeries_proc_lock, flags);
printk("iSeries_proc: Creating /proc/iSeries\n"); printk("iSeries_proc: Creating /proc/iSeries\n");
spin_lock_irqsave(&iSeries_proc_lock, flags);
iSeries_proc_root = proc_mkdir("iSeries", 0); iSeries_proc_root = proc_mkdir("iSeries", 0);
if (!iSeries_proc_root) return; if (!iSeries_proc_root)
goto out;
MYQUEUEDEQ(&iSeries_queued, reg); MYQUEUEDEQ(&iSeries_queued, reg);
while (reg != NULL) { while (reg != NULL) {
(*(reg->functionMember))(iSeries_proc_root); (*(reg->functionMember))(iSeries_proc_root);
MYQUEUEDEQ(&iSeries_queued, reg); MYQUEUEDEQ(&iSeries_queued, reg);
} }
iSeries_proc_initializationDone = 1; iSeries_proc_initializationDone = 1;
out:
spin_unlock_irqrestore(&iSeries_proc_lock, flags); spin_unlock_irqrestore(&iSeries_proc_lock, flags);
} }
......
...@@ -25,6 +25,8 @@ ...@@ -25,6 +25,8 @@
#include <linux/bootmem.h> #include <linux/bootmem.h>
#include <linux/initrd.h> #include <linux/initrd.h>
#include <linux/seq_file.h> #include <linux/seq_file.h>
#include <linux/kdev_t.h>
#include <linux/major.h>
#include <linux/root_dev.h> #include <linux/root_dev.h>
#include <asm/processor.h> #include <asm/processor.h>
...@@ -53,34 +55,34 @@ ...@@ -53,34 +55,34 @@
#include <asm/iSeries/mf.h> #include <asm/iSeries/mf.h>
/* Function Prototypes */ /* Function Prototypes */
extern void abort(void); extern void abort(void);
#ifdef CONFIG_PPC_ISERIES
static void build_iSeries_Memory_Map( void );
static void setup_iSeries_cache_sizes( void );
static void iSeries_bolt_kernel(unsigned long saddr, unsigned long eaddr);
#endif
void build_valid_hpte( unsigned long vsid, unsigned long ea, unsigned long pa,
pte_t * ptep, unsigned hpteflags, unsigned bolted );
extern void ppcdbg_initialize(void); extern void ppcdbg_initialize(void);
extern void iSeries_pcibios_init(void); extern void iSeries_pcibios_init(void);
static void build_iSeries_Memory_Map(void);
static void setup_iSeries_cache_sizes(void);
static void iSeries_bolt_kernel(unsigned long saddr, unsigned long eaddr);
void build_valid_hpte(unsigned long vsid, unsigned long ea, unsigned long pa,
pte_t *ptep, unsigned hpteflags, unsigned bolted);
static void iSeries_setup_dprofile(void); static void iSeries_setup_dprofile(void);
void iSeries_setup_arch(void);
/* Global Variables */ /* Global Variables */
static unsigned long procFreqHz;
static unsigned long procFreqMhz;
static unsigned long procFreqMhzHundreths;
static unsigned long procFreqHz = 0; static unsigned long tbFreqHz;
static unsigned long procFreqMhz = 0; static unsigned long tbFreqMhz;
static unsigned long procFreqMhzHundreths = 0; static unsigned long tbFreqMhzHundreths;
static unsigned long tbFreqHz = 0; unsigned long dprof_shift;
static unsigned long tbFreqMhz = 0; unsigned long dprof_len;
static unsigned long tbFreqMhzHundreths = 0; unsigned int *dprof_buffer;
unsigned long dprof_shift = 0; int piranha_simulator;
unsigned long dprof_len = 0;
unsigned int * dprof_buffer = NULL;
int piranha_simulator = 0; int boot_cpuid;
extern char _end[]; extern char _end[];
...@@ -92,7 +94,7 @@ extern unsigned long embedded_sysmap_end; ...@@ -92,7 +94,7 @@ extern unsigned long embedded_sysmap_end;
extern unsigned long iSeries_recal_tb; extern unsigned long iSeries_recal_tb;
extern unsigned long iSeries_recal_titan; extern unsigned long iSeries_recal_titan;
static int mf_initialized = 0; static int mf_initialized;
struct MemoryBlock { struct MemoryBlock {
unsigned long absStart; unsigned long absStart;
...@@ -106,30 +108,30 @@ struct MemoryBlock { ...@@ -106,30 +108,30 @@ struct MemoryBlock {
* and return the number of physical blocks and fill in the array of * and return the number of physical blocks and fill in the array of
* block data. * block data.
*/ */
unsigned long iSeries_process_Condor_mainstore_vpd(struct MemoryBlock *mb_array,
unsigned long iSeries_process_Condor_mainstore_vpd( struct MemoryBlock *mb_array, unsigned long max_entries ) unsigned long max_entries)
{ {
/* Determine if absolute memory has any
* holes so that we can interpret the
* access map we get back from the hypervisor
* correctly.
*/
unsigned long holeFirstChunk, holeSizeChunks; unsigned long holeFirstChunk, holeSizeChunks;
unsigned long numMemoryBlocks = 1; unsigned long numMemoryBlocks = 1;
struct IoHriMainStoreSegment4 * msVpd = (struct IoHriMainStoreSegment4 *)xMsVpd; struct IoHriMainStoreSegment4 *msVpd =
(struct IoHriMainStoreSegment4 *)xMsVpd;
unsigned long holeStart = msVpd->nonInterleavedBlocksStartAdr; unsigned long holeStart = msVpd->nonInterleavedBlocksStartAdr;
unsigned long holeEnd = msVpd->nonInterleavedBlocksEndAdr; unsigned long holeEnd = msVpd->nonInterleavedBlocksEndAdr;
unsigned long holeSize = holeEnd - holeStart; unsigned long holeSize = holeEnd - holeStart;
printk("Mainstore_VPD: Condor\n"); printk("Mainstore_VPD: Condor\n");
/*
* Determine if absolute memory has any
* holes so that we can interpret the
* access map we get back from the hypervisor
* correctly.
*/
mb_array[0].logicalStart = 0; mb_array[0].logicalStart = 0;
mb_array[0].logicalEnd = 0x100000000; mb_array[0].logicalEnd = 0x100000000;
mb_array[0].absStart = 0; mb_array[0].absStart = 0;
mb_array[0].absEnd = 0x100000000; mb_array[0].absEnd = 0x100000000;
if ( holeSize ) { if (holeSize) {
numMemoryBlocks = 2; numMemoryBlocks = 2;
holeStart = holeStart & 0x000fffffffffffff; holeStart = holeStart & 0x000fffffffffffff;
holeStart = addr_to_chunk(holeStart); holeStart = addr_to_chunk(holeStart);
...@@ -138,275 +140,264 @@ unsigned long iSeries_process_Condor_mainstore_vpd( struct MemoryBlock *mb_array ...@@ -138,275 +140,264 @@ unsigned long iSeries_process_Condor_mainstore_vpd( struct MemoryBlock *mb_array
holeSizeChunks = holeSize; holeSizeChunks = holeSize;
printk( "Main store hole: start chunk = %0lx, size = %0lx chunks\n", printk( "Main store hole: start chunk = %0lx, size = %0lx chunks\n",
holeFirstChunk, holeSizeChunks ); holeFirstChunk, holeSizeChunks );
mb_array[0].logicalEnd = holeFirstChunk; mb_array[0].logicalEnd = holeFirstChunk;
mb_array[0].absEnd = holeFirstChunk; mb_array[0].absEnd = holeFirstChunk;
mb_array[1].logicalStart = holeFirstChunk; mb_array[1].logicalStart = holeFirstChunk;
mb_array[1].logicalEnd = 0x100000000 - holeSizeChunks; mb_array[1].logicalEnd = 0x100000000 - holeSizeChunks;
mb_array[1].absStart = holeFirstChunk + holeSizeChunks; mb_array[1].absStart = holeFirstChunk + holeSizeChunks;
mb_array[1].absEnd = 0x100000000; mb_array[1].absEnd = 0x100000000;
} }
return numMemoryBlocks; return numMemoryBlocks;
} }
#define MaxSegmentAreas 32 #define MaxSegmentAreas 32
#define MaxSegmentAdrRangeBlocks 128 #define MaxSegmentAdrRangeBlocks 128
#define MaxAreaRangeBlocks 4 #define MaxAreaRangeBlocks 4
unsigned long iSeries_process_Regatta_mainstore_vpd( struct MemoryBlock *mb_array, unsigned long max_entries )
unsigned long iSeries_process_Regatta_mainstore_vpd(
struct MemoryBlock *mb_array, unsigned long max_entries)
{ {
struct IoHriMainStoreSegment5 * msVpdP = (struct IoHriMainStoreSegment5 *)xMsVpd; struct IoHriMainStoreSegment5 *msVpdP =
(struct IoHriMainStoreSegment5 *)xMsVpd;
unsigned long numSegmentBlocks = 0; unsigned long numSegmentBlocks = 0;
u32 existsBits = msVpdP->msAreaExists; u32 existsBits = msVpdP->msAreaExists;
unsigned long area_num; unsigned long area_num;
printk("Mainstore_VPD: Regatta\n"); printk("Mainstore_VPD: Regatta\n");
for ( area_num = 0; area_num < MaxSegmentAreas; ++area_num ) { for (area_num = 0; area_num < MaxSegmentAreas; ++area_num ) {
unsigned long numAreaBlocks; unsigned long numAreaBlocks;
struct IoHriMainStoreArea4 * currentArea; struct IoHriMainStoreArea4 *currentArea;
if ( existsBits & 0x80000000 ) { if (existsBits & 0x80000000) {
unsigned long block_num; unsigned long block_num;
currentArea = &msVpdP->msAreaArray[area_num]; currentArea = &msVpdP->msAreaArray[area_num];
numAreaBlocks = currentArea->numAdrRangeBlocks; numAreaBlocks = currentArea->numAdrRangeBlocks;
printk("ms_vpd: processing area %2ld blocks=%ld",
printk("ms_vpd: processing area %2ld blocks=%ld", area_num, numAreaBlocks); area_num, numAreaBlocks);
for (block_num = 0; block_num < numAreaBlocks;
for ( block_num = 0; block_num < numAreaBlocks; ++block_num ) { ++block_num ) {
/* Process an address range block */ /* Process an address range block */
struct MemoryBlock tempBlock; struct MemoryBlock tempBlock;
unsigned long i; unsigned long i;
tempBlock.absStart = (unsigned long)currentArea->xAdrRangeBlock[block_num].blockStart; tempBlock.absStart =
tempBlock.absEnd = (unsigned long)currentArea->xAdrRangeBlock[block_num].blockEnd; (unsigned long)currentArea->xAdrRangeBlock[block_num].blockStart;
tempBlock.absEnd =
(unsigned long)currentArea->xAdrRangeBlock[block_num].blockEnd;
tempBlock.logicalStart = 0; tempBlock.logicalStart = 0;
tempBlock.logicalEnd = 0; tempBlock.logicalEnd = 0;
printk("\n block %ld absStart=%016lx absEnd=%016lx",
block_num, tempBlock.absStart,
tempBlock.absEnd);
printk("\n block %ld absStart=%016lx absEnd=%016lx", block_num, for (i = 0; i < numSegmentBlocks; ++i) {
tempBlock.absStart, tempBlock.absEnd); if (mb_array[i].absStart ==
tempBlock.absStart)
for ( i=0; i<numSegmentBlocks; ++i ) {
if ( mb_array[i].absStart == tempBlock.absStart )
break; break;
} }
if ( i == numSegmentBlocks ) { if (i == numSegmentBlocks) {
if ( numSegmentBlocks == max_entries ) { if (numSegmentBlocks == max_entries)
panic("iSeries_process_mainstore_vpd: too many memory blocks"); panic("iSeries_process_mainstore_vpd: too many memory blocks");
}
mb_array[numSegmentBlocks] = tempBlock; mb_array[numSegmentBlocks] = tempBlock;
++numSegmentBlocks; ++numSegmentBlocks;
} } else
else {
printk(" (duplicate)"); printk(" (duplicate)");
}
} }
printk("\n"); printk("\n");
} }
existsBits <<= 1; existsBits <<= 1;
} }
/* Now sort the blocks found into ascending sequence */ /* Now sort the blocks found into ascending sequence */
if ( numSegmentBlocks > 1 ) { if (numSegmentBlocks > 1) {
unsigned long m, n; unsigned long m, n;
for ( m=0; m<numSegmentBlocks-1; ++m ) {
for ( n=numSegmentBlocks-1; m<n; --n ) { for (m = 0; m < numSegmentBlocks - 1; ++m) {
if ( mb_array[n].absStart < mb_array[n-1].absStart ) { for (n = numSegmentBlocks - 1; m < n; --n) {
if (mb_array[n].absStart <
mb_array[n-1].absStart) {
struct MemoryBlock tempBlock; struct MemoryBlock tempBlock;
tempBlock = mb_array[n]; tempBlock = mb_array[n];
mb_array[n] = mb_array[n-1]; mb_array[n] = mb_array[n-1];
mb_array[n-1] = tempBlock; mb_array[n-1] = tempBlock;
} }
} }
} }
} }
/* Assign "logical" addresses to each block. These /*
* Assign "logical" addresses to each block. These
* addresses correspond to the hypervisor "bitmap" space. * addresses correspond to the hypervisor "bitmap" space.
* Convert all addresses into units of 256K chunks. * Convert all addresses into units of 256K chunks.
*/ */
{ {
unsigned long i, nextBitmapAddress; unsigned long i, nextBitmapAddress;
printk("ms_vpd: %ld sorted memory blocks\n", numSegmentBlocks); printk("ms_vpd: %ld sorted memory blocks\n", numSegmentBlocks);
nextBitmapAddress = 0; nextBitmapAddress = 0;
for ( i=0; i<numSegmentBlocks; ++i ) { for (i = 0; i < numSegmentBlocks; ++i) {
unsigned long length = mb_array[i].absEnd - mb_array[i].absStart; unsigned long length = mb_array[i].absEnd -
mb_array[i].absStart;
mb_array[i].logicalStart = nextBitmapAddress; mb_array[i].logicalStart = nextBitmapAddress;
mb_array[i].logicalEnd = nextBitmapAddress + length; mb_array[i].logicalEnd = nextBitmapAddress + length;
nextBitmapAddress += length; nextBitmapAddress += length;
printk(" Bitmap range: %016lx - %016lx\n" printk(" Bitmap range: %016lx - %016lx\n"
" Absolute range: %016lx - %016lx\n", " Absolute range: %016lx - %016lx\n",
mb_array[i].logicalStart, mb_array[i].logicalEnd, mb_array[i].logicalStart,
mb_array[i].logicalEnd,
mb_array[i].absStart, mb_array[i].absEnd); mb_array[i].absStart, mb_array[i].absEnd);
mb_array[i].absStart = addr_to_chunk( mb_array[i].absStart & 0x000fffffffffffff ); mb_array[i].absStart = addr_to_chunk(mb_array[i].absStart &
mb_array[i].absEnd = addr_to_chunk( mb_array[i].absEnd & 0x000fffffffffffff ); 0x000fffffffffffff);
mb_array[i].logicalStart = addr_to_chunk( mb_array[i].logicalStart ); mb_array[i].absEnd = addr_to_chunk(mb_array[i].absEnd &
mb_array[i].logicalEnd = addr_to_chunk( mb_array[i].logicalEnd ); 0x000fffffffffffff);
mb_array[i].logicalStart =
addr_to_chunk(mb_array[i].logicalStart);
mb_array[i].logicalEnd = addr_to_chunk(mb_array[i].logicalEnd);
} }
} }
return numSegmentBlocks; return numSegmentBlocks;
} }
unsigned long iSeries_process_mainstore_vpd( struct MemoryBlock *mb_array, unsigned long max_entries ) unsigned long iSeries_process_mainstore_vpd(struct MemoryBlock *mb_array,
unsigned long max_entries)
{ {
unsigned long i; unsigned long i;
unsigned long mem_blocks = 0; unsigned long mem_blocks = 0;
if (cur_cpu_spec->cpu_features & CPU_FTR_SLB) if (cur_cpu_spec->cpu_features & CPU_FTR_SLB)
mem_blocks = iSeries_process_Regatta_mainstore_vpd( mb_array, max_entries ); mem_blocks = iSeries_process_Regatta_mainstore_vpd(mb_array,
max_entries);
else else
mem_blocks = iSeries_process_Condor_mainstore_vpd( mb_array, max_entries ); mem_blocks = iSeries_process_Condor_mainstore_vpd(mb_array,
max_entries);
printk("Mainstore_VPD: numMemoryBlocks = %ld \n", mem_blocks); printk("Mainstore_VPD: numMemoryBlocks = %ld \n", mem_blocks);
for ( i=0; i<mem_blocks; ++i ) { for (i = 0; i < mem_blocks; ++i) {
printk("Mainstore_VPD: block %3ld logical chunks %016lx - %016lx\n" printk("Mainstore_VPD: block %3ld logical chunks %016lx - %016lx\n"
" abs chunks %016lx - %016lx\n", " abs chunks %016lx - %016lx\n",
i, mb_array[i].logicalStart, mb_array[i].logicalEnd, i, mb_array[i].logicalStart, mb_array[i].logicalEnd,
mb_array[i].absStart, mb_array[i].absEnd); mb_array[i].absStart, mb_array[i].absEnd);
} }
return mem_blocks; return mem_blocks;
} }
/* void __init iSeries_init_early(void)
* void __init iSeries_init_early()
*/
void __init
iSeries_init_early(void)
{ {
#ifdef CONFIG_PPC_ISERIES
ppcdbg_initialize(); ppcdbg_initialize();
#if defined(CONFIG_BLK_DEV_INITRD) #if defined(CONFIG_BLK_DEV_INITRD)
/* /*
* If the init RAM disk has been configured and there is * If the init RAM disk has been configured and there is
* a non-zero starting address for it, set it up * a non-zero starting address for it, set it up
*/ */
if (naca->xRamDisk) {
if ( naca->xRamDisk ) {
initrd_start = (unsigned long)__va(naca->xRamDisk); initrd_start = (unsigned long)__va(naca->xRamDisk);
initrd_end = initrd_start + naca->xRamDiskSize * PAGE_SIZE; initrd_end = initrd_start + naca->xRamDiskSize * PAGE_SIZE;
initrd_below_start_ok = 1; // ramdisk in kernel space initrd_below_start_ok = 1; // ramdisk in kernel space
ROOT_DEV = Root_RAM0; ROOT_DEV = Root_RAM0;
if (((rd_size * 1024) / PAGE_SIZE) < naca->xRamDiskSize)
if ( ((rd_size*1024)/PAGE_SIZE) < naca->xRamDiskSize ) rd_size = (naca->xRamDiskSize * PAGE_SIZE) / 1024;
rd_size = (naca->xRamDiskSize*PAGE_SIZE)/1024;
} else } else
#endif /* CONFIG_BLK_DEV_INITRD */ #endif /* CONFIG_BLK_DEV_INITRD */
{ {
/* ROOT_DEV = MKDEV(VIODASD_MAJOR, 1); */
/* ROOT_DEV = MKDEV( VIODASD_MAJOR, 1 ); */ }
}
iSeries_recal_tb = get_tb(); iSeries_recal_tb = get_tb();
iSeries_recal_titan = HvCallXm_loadTod(); iSeries_recal_titan = HvCallXm_loadTod();
ppc_md.setup_arch = iSeries_setup_arch; ppc_md.setup_arch = iSeries_setup_arch;
ppc_md.setup_residual = iSeries_setup_residual; ppc_md.setup_residual = iSeries_setup_residual;
ppc_md.get_cpuinfo = iSeries_get_cpuinfo; ppc_md.get_cpuinfo = iSeries_get_cpuinfo;
ppc_md.init_IRQ = iSeries_init_IRQ; ppc_md.init_IRQ = iSeries_init_IRQ;
ppc_md.get_irq = iSeries_get_irq; ppc_md.init_irq_desc = iSeries_init_irq_desc;
ppc_md.init = NULL; ppc_md.get_irq = iSeries_get_irq;
ppc_md.init = NULL;
ppc_md.restart = iSeries_restart; ppc_md.restart = iSeries_restart;
ppc_md.power_off = iSeries_power_off; ppc_md.power_off = iSeries_power_off;
ppc_md.halt = iSeries_halt; ppc_md.halt = iSeries_halt;
ppc_md.get_boot_time = iSeries_get_boot_time; ppc_md.get_boot_time = iSeries_get_boot_time;
ppc_md.set_rtc_time = iSeries_set_rtc_time; ppc_md.set_rtc_time = iSeries_set_rtc_time;
ppc_md.get_rtc_time = iSeries_get_rtc_time; ppc_md.get_rtc_time = iSeries_get_rtc_time;
ppc_md.calibrate_decr = iSeries_calibrate_decr; ppc_md.calibrate_decr = iSeries_calibrate_decr;
ppc_md.progress = iSeries_progress; ppc_md.progress = iSeries_progress;
hpte_init_iSeries(); hpte_init_iSeries();
tce_init_iSeries(); tce_init_iSeries();
/* Initialize the table which translate Linux physical addresses to /*
* Initialize the table which translate Linux physical addresses to
* AS/400 absolute addresses * AS/400 absolute addresses
*/ */
build_iSeries_Memory_Map(); build_iSeries_Memory_Map();
setup_iSeries_cache_sizes(); setup_iSeries_cache_sizes();
/* Initialize machine-dependency vectors */ /* Initialize machine-dependency vectors */
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
smp_init_iSeries(); smp_init_iSeries();
#endif #endif
if (itLpNaca.xPirEnvironMode == 0)
if ( itLpNaca.xPirEnvironMode == 0 )
piranha_simulator = 1; piranha_simulator = 1;
#endif
} }
/* void __init iSeries_init(unsigned long r3, unsigned long r4, unsigned long r5,
* void __init iSeries_init()
*/
void __init
iSeries_init(unsigned long r3, unsigned long r4, unsigned long r5,
unsigned long r6, unsigned long r7) unsigned long r6, unsigned long r7)
{ {
/* Associate Lp Event Queue 0 with processor 0 */ char *p, *q;
HvCallEvent_setLpEventQueueInterruptProc( 0, 0 );
{ /* Associate Lp Event Queue 0 with processor 0 */
/* copy the command line parameter from the primary VSP */ HvCallEvent_setLpEventQueueInterruptProc(0, 0);
char *p, *q;
HvCallEvent_dmaToSp( cmd_line, /* copy the command line parameter from the primary VSP */
2*64*1024, HvCallEvent_dmaToSp(cmd_line, 2 * 64* 1024, 256,
256, HvLpDma_Direction_RemoteToLocal);
HvLpDma_Direction_RemoteToLocal );
p = q = cmd_line + 255;
p = q = cmd_line + 255; while (p > cmd_line) {
while( p > cmd_line ) { if ((*p == 0) || (*p == ' ') || (*p == '\n'))
if ((*p == 0) || (*p == ' ') || (*p == '\n')) --p;
--p; else
else break;
break;
}
if ( p < q )
*(p+1) = 0;
} }
if (p < q)
*(p + 1) = 0;
if (strstr(cmd_line, "dprofile=")) { if (strstr(cmd_line, "dprofile=")) {
char *p, *q;
for (q = cmd_line; (p = strstr(q, "dprofile=")) != 0; ) { for (q = cmd_line; (p = strstr(q, "dprofile=")) != 0; ) {
unsigned long size, new_klimit; unsigned long size, new_klimit;
q = p + 9; q = p + 9;
if (p > cmd_line && p[-1] != ' ') if ((p > cmd_line) && (p[-1] != ' '))
continue; continue;
dprof_shift = simple_strtoul(q, &q, 0); dprof_shift = simple_strtoul(q, &q, 0);
dprof_len = (unsigned long)_etext - (unsigned long)_stext; dprof_len = (unsigned long)_etext -
(unsigned long)_stext;
dprof_len >>= dprof_shift; dprof_len >>= dprof_shift;
size = ((dprof_len * sizeof(unsigned int)) + (PAGE_SIZE-1)) & PAGE_MASK; size = ((dprof_len * sizeof(unsigned int)) +
dprof_buffer = (unsigned int *)((klimit + (PAGE_SIZE-1)) & PAGE_MASK); (PAGE_SIZE-1)) & PAGE_MASK;
dprof_buffer = (unsigned int *)((klimit +
(PAGE_SIZE-1)) & PAGE_MASK);
new_klimit = ((unsigned long)dprof_buffer) + size; new_klimit = ((unsigned long)dprof_buffer) + size;
lmb_reserve( __pa(klimit), (new_klimit-klimit)); lmb_reserve(__pa(klimit), (new_klimit-klimit));
klimit = new_klimit; klimit = new_klimit;
memset( dprof_buffer, 0, size ); memset(dprof_buffer, 0, size);
} }
} }
iSeries_setup_dprofile(); iSeries_setup_dprofile();
iSeries_proc_early_init(); iSeries_proc_early_init();
mf_init(); mf_init();
mf_initialized = 1; mf_initialized = 1;
mb(); mb();
iSeries_proc_callback( &pmc_proc_init ); iSeries_proc_callback(&pmc_proc_init);
} }
#ifdef CONFIG_PPC_ISERIES
/* /*
* The iSeries may have very large memories ( > 128 GB ) and a partition * The iSeries may have very large memories ( > 128 GB ) and a partition
* may get memory in "chunks" that may be anywhere in the 2**52 real * may get memory in "chunks" that may be anywhere in the 2**52 real
...@@ -444,9 +435,10 @@ static void __init build_iSeries_Memory_Map(void) ...@@ -444,9 +435,10 @@ static void __init build_iSeries_Memory_Map(void)
/* Chunk size on iSeries is 256K bytes */ /* Chunk size on iSeries is 256K bytes */
totalChunks = (u32)HvLpConfig_getMsChunks(); totalChunks = (u32)HvLpConfig_getMsChunks();
klimit = msChunks_alloc(klimit, totalChunks, 1UL<<18); klimit = msChunks_alloc(klimit, totalChunks, 1UL << 18);
/* Get absolute address of our load area /*
* Get absolute address of our load area
* and map it to physical address 0 * and map it to physical address 0
* This guarantees that the loadarea ends up at physical 0 * This guarantees that the loadarea ends up at physical 0
* otherwise, it might not be returned by PLIC as the first * otherwise, it might not be returned by PLIC as the first
...@@ -456,63 +448,68 @@ static void __init build_iSeries_Memory_Map(void) ...@@ -456,63 +448,68 @@ static void __init build_iSeries_Memory_Map(void)
loadAreaFirstChunk = (u32)addr_to_chunk(itLpNaca.xLoadAreaAddr); loadAreaFirstChunk = (u32)addr_to_chunk(itLpNaca.xLoadAreaAddr);
loadAreaSize = itLpNaca.xLoadAreaChunks; loadAreaSize = itLpNaca.xLoadAreaChunks;
/* Only add the pages already mapped here. /*
* Only add the pages already mapped here.
* Otherwise we might add the hpt pages * Otherwise we might add the hpt pages
* The rest of the pages of the load area * The rest of the pages of the load area
* aren't in the HPT yet and can still * aren't in the HPT yet and can still
* be assigned an arbitrary physical address * be assigned an arbitrary physical address
*/ */
if ( (loadAreaSize * 64) > HvPagesToMap ) if ((loadAreaSize * 64) > HvPagesToMap)
loadAreaSize = HvPagesToMap / 64; loadAreaSize = HvPagesToMap / 64;
loadAreaLastChunk = loadAreaFirstChunk + loadAreaSize - 1; loadAreaLastChunk = loadAreaFirstChunk + loadAreaSize - 1;
/* TODO Do we need to do something if the HPT is in the 64MB load area? /*
* TODO Do we need to do something if the HPT is in the 64MB load area?
* This would be required if the itLpNaca.xLoadAreaChunks includes * This would be required if the itLpNaca.xLoadAreaChunks includes
* the HPT size * the HPT size
*/ */
printk( "Mapping load area - physical addr = 0000000000000000\n" printk("Mapping load area - physical addr = 0000000000000000\n"
" absolute addr = %016lx\n", " absolute addr = %016lx\n",
chunk_to_addr(loadAreaFirstChunk) ); chunk_to_addr(loadAreaFirstChunk));
printk( "Load area size %dK\n", loadAreaSize*256 ); printk("Load area size %dK\n", loadAreaSize * 256);
for ( nextPhysChunk = 0; for (nextPhysChunk = 0; nextPhysChunk < loadAreaSize; ++nextPhysChunk)
nextPhysChunk < loadAreaSize; msChunks.abs[nextPhysChunk] =
++nextPhysChunk ) { loadAreaFirstChunk + nextPhysChunk;
msChunks.abs[nextPhysChunk] = loadAreaFirstChunk+nextPhysChunk;
}
/* Get absolute address of our HPT and remember it so /*
* Get absolute address of our HPT and remember it so
* we won't map it to any physical address * we won't map it to any physical address
*/ */
hptFirstChunk = (u32)addr_to_chunk(HvCallHpt_getHptAddress()); hptFirstChunk = (u32)addr_to_chunk(HvCallHpt_getHptAddress());
hptSizePages = (u32)(HvCallHpt_getHptPages()); hptSizePages = (u32)HvCallHpt_getHptPages();
hptSizeChunks = hptSizePages >> (msChunks.chunk_shift-PAGE_SHIFT); hptSizeChunks = hptSizePages >> (msChunks.chunk_shift - PAGE_SHIFT);
hptLastChunk = hptFirstChunk + hptSizeChunks - 1; hptLastChunk = hptFirstChunk + hptSizeChunks - 1;
printk( "HPT absolute addr = %016lx, size = %dK\n", printk("HPT absolute addr = %016lx, size = %dK\n",
chunk_to_addr(hptFirstChunk), hptSizeChunks*256 ); chunk_to_addr(hptFirstChunk), hptSizeChunks * 256);
/* Fill in the htab_data structure */ /* Fill in the htab_data structure */
/* Fill in size of hashed page table */ /* Fill in size of hashed page table */
num_ptegs = hptSizePages * (PAGE_SIZE/(sizeof(HPTE)*HPTES_PER_GROUP)); num_ptegs = hptSizePages *
(PAGE_SIZE / (sizeof(HPTE) * HPTES_PER_GROUP));
htab_data.htab_num_ptegs = num_ptegs; htab_data.htab_num_ptegs = num_ptegs;
htab_data.htab_hash_mask = num_ptegs - 1; htab_data.htab_hash_mask = num_ptegs - 1;
/* The actual hashed page table is in the hypervisor, we have no direct access */ /*
* The actual hashed page table is in the hypervisor,
* we have no direct access
*/
htab_data.htab = NULL; htab_data.htab = NULL;
/* Determine if absolute memory has any /*
* Determine if absolute memory has any
* holes so that we can interpret the * holes so that we can interpret the
* access map we get back from the hypervisor * access map we get back from the hypervisor
* correctly. * correctly.
*/ */
numMemoryBlocks = iSeries_process_mainstore_vpd( mb, 32 ); numMemoryBlocks = iSeries_process_mainstore_vpd(mb, 32);
/* Process the main store access map from the hypervisor /*
* Process the main store access map from the hypervisor
* to build up our physical -> absolute translation table * to build up our physical -> absolute translation table
*/ */
curBlock = 0; curBlock = 0;
...@@ -520,30 +517,29 @@ static void __init build_iSeries_Memory_Map(void) ...@@ -520,30 +517,29 @@ static void __init build_iSeries_Memory_Map(void)
currDword = 0; currDword = 0;
moreChunks = totalChunks; moreChunks = totalChunks;
while ( moreChunks ) { while (moreChunks) {
map = HvCallSm_get64BitsOfAccessMap( itLpNaca.xLpIndex, map = HvCallSm_get64BitsOfAccessMap(itLpNaca.xLpIndex,
currDword ); currDword);
thisChunk = currChunk; thisChunk = currChunk;
while ( map ) { while (map) {
chunkBit = map >> 63; chunkBit = map >> 63;
map <<= 1; map <<= 1;
if ( chunkBit ) { if (chunkBit) {
--moreChunks; --moreChunks;
while (thisChunk >= mb[curBlock].logicalEnd) {
while ( thisChunk >= mb[curBlock].logicalEnd ) {
++curBlock; ++curBlock;
if ( curBlock >= numMemoryBlocks ) if (curBlock >= numMemoryBlocks)
panic("out of memory blocks"); panic("out of memory blocks");
} }
if ( thisChunk < mb[curBlock].logicalStart ) if (thisChunk < mb[curBlock].logicalStart)
panic("memory block error"); panic("memory block error");
absChunk = mb[curBlock].absStart + ( thisChunk - mb[curBlock].logicalStart ); absChunk = mb[curBlock].absStart +
(thisChunk - mb[curBlock].logicalStart);
if ( ( ( absChunk < hptFirstChunk ) || if (((absChunk < hptFirstChunk) ||
( absChunk > hptLastChunk ) ) && (absChunk > hptLastChunk)) &&
( ( absChunk < loadAreaFirstChunk ) || ((absChunk < loadAreaFirstChunk) ||
( absChunk > loadAreaLastChunk ) ) ) { (absChunk > loadAreaLastChunk))) {
msChunks.abs[nextPhysChunk] = absChunk; msChunks.abs[nextPhysChunk] = absChunk;
++nextPhysChunk; ++nextPhysChunk;
} }
...@@ -553,8 +549,9 @@ static void __init build_iSeries_Memory_Map(void) ...@@ -553,8 +549,9 @@ static void __init build_iSeries_Memory_Map(void)
++currDword; ++currDword;
currChunk += 64; currChunk += 64;
} }
/* main store size (in chunks) is /*
* main store size (in chunks) is
* totalChunks - hptSizeChunks * totalChunks - hptSizeChunks
* which should be equal to * which should be equal to
* nextPhysChunk * nextPhysChunk
...@@ -562,12 +559,12 @@ static void __init build_iSeries_Memory_Map(void) ...@@ -562,12 +559,12 @@ static void __init build_iSeries_Memory_Map(void)
systemcfg->physicalMemorySize = chunk_to_addr(nextPhysChunk); systemcfg->physicalMemorySize = chunk_to_addr(nextPhysChunk);
/* Bolt kernel mappings for all of memory */ /* Bolt kernel mappings for all of memory */
iSeries_bolt_kernel( 0, systemcfg->physicalMemorySize ); iSeries_bolt_kernel(0, systemcfg->physicalMemorySize);
lmb_init(); lmb_init();
lmb_add( 0, systemcfg->physicalMemorySize ); lmb_add(0, systemcfg->physicalMemorySize);
lmb_analyze(); /* ?? */ lmb_analyze(); /* ?? */
lmb_reserve( 0, __pa(klimit)); lmb_reserve(0, __pa(klimit));
/* /*
* Hardcode to GP size. I am not sure where to get this info. DRENG * Hardcode to GP size. I am not sure where to get this info. DRENG
...@@ -579,59 +576,94 @@ static void __init build_iSeries_Memory_Map(void) ...@@ -579,59 +576,94 @@ static void __init build_iSeries_Memory_Map(void)
* Set up the variables that describe the cache line sizes * Set up the variables that describe the cache line sizes
* for this machine. * for this machine.
*/ */
static void __init setup_iSeries_cache_sizes(void) static void __init setup_iSeries_cache_sizes(void)
{ {
unsigned int i, n; unsigned int i, n;
unsigned int procIx = get_paca()->xLpPaca.xDynHvPhysicalProcIndex; unsigned int procIx = get_paca()->xLpPaca.xDynHvPhysicalProcIndex;
systemcfg->iCacheL1Size = xIoHriProcessorVpd[procIx].xInstCacheSize * 1024; systemcfg->iCacheL1Size =
systemcfg->iCacheL1LineSize = xIoHriProcessorVpd[procIx].xInstCacheOperandSize; xIoHriProcessorVpd[procIx].xInstCacheSize * 1024;
systemcfg->dCacheL1Size = xIoHriProcessorVpd[procIx].xDataL1CacheSizeKB * 1024; systemcfg->iCacheL1LineSize =
systemcfg->dCacheL1LineSize = xIoHriProcessorVpd[procIx].xDataCacheOperandSize; xIoHriProcessorVpd[procIx].xInstCacheOperandSize;
systemcfg->dCacheL1Size =
xIoHriProcessorVpd[procIx].xDataL1CacheSizeKB * 1024;
systemcfg->dCacheL1LineSize =
xIoHriProcessorVpd[procIx].xDataCacheOperandSize;
naca->iCacheL1LinesPerPage = PAGE_SIZE / systemcfg->iCacheL1LineSize; naca->iCacheL1LinesPerPage = PAGE_SIZE / systemcfg->iCacheL1LineSize;
naca->dCacheL1LinesPerPage = PAGE_SIZE / systemcfg->dCacheL1LineSize; naca->dCacheL1LinesPerPage = PAGE_SIZE / systemcfg->dCacheL1LineSize;
i = systemcfg->iCacheL1LineSize; i = systemcfg->iCacheL1LineSize;
n = 0; n = 0;
while ((i=(i/2))) ++n; while ((i = (i / 2)))
++n;
naca->iCacheL1LogLineSize = n; naca->iCacheL1LogLineSize = n;
i = systemcfg->dCacheL1LineSize; i = systemcfg->dCacheL1LineSize;
n = 0; n = 0;
while ((i=(i/2))) ++n; while ((i = (i / 2)))
++n;
naca->dCacheL1LogLineSize = n; naca->dCacheL1LogLineSize = n;
printk( "D-cache line size = %d\n", (unsigned int)systemcfg->dCacheL1LineSize); printk("D-cache line size = %d\n",
printk( "I-cache line size = %d\n", (unsigned int)systemcfg->iCacheL1LineSize); (unsigned int)systemcfg->dCacheL1LineSize);
printk("I-cache line size = %d\n",
(unsigned int)systemcfg->iCacheL1LineSize);
} }
/* /*
* Bolt the kernel addr space into the HPT * Create a pte. Used during initialization only.
*/ */
static void iSeries_make_pte(unsigned long va, unsigned long pa,
int mode)
{
HPTE local_hpte, rhpte;
unsigned long hash, vpn;
long slot;
vpn = va >> PAGE_SHIFT;
hash = hpt_hash(vpn, 0);
local_hpte.dw1.dword1 = pa | mode;
local_hpte.dw0.dword0 = 0;
local_hpte.dw0.dw0.avpn = va >> 23;
local_hpte.dw0.dw0.bolted = 1; /* bolted */
local_hpte.dw0.dw0.v = 1;
slot = HvCallHpt_findValid(&rhpte, vpn);
if (slot < 0) {
/* Must find space in primary group */
panic("hash_page: hpte already exists\n");
}
HvCallHpt_addValidate(slot, 0, (HPTE *)&local_hpte );
}
/*
* Bolt the kernel addr space into the HPT
*/
static void __init iSeries_bolt_kernel(unsigned long saddr, unsigned long eaddr) static void __init iSeries_bolt_kernel(unsigned long saddr, unsigned long eaddr)
{ {
unsigned long pa; unsigned long pa;
unsigned long mode_rw = _PAGE_ACCESSED | _PAGE_COHERENT | PP_RWXX; unsigned long mode_rw = _PAGE_ACCESSED | _PAGE_COHERENT | PP_RWXX;
HPTE hpte; HPTE hpte;
for (pa=saddr; pa < eaddr ;pa+=PAGE_SIZE) { for (pa = saddr; pa < eaddr ;pa += PAGE_SIZE) {
unsigned long ea = (unsigned long)__va(pa); unsigned long ea = (unsigned long)__va(pa);
unsigned long vsid = get_kernel_vsid( ea ); unsigned long vsid = get_kernel_vsid(ea);
unsigned long va = ( vsid << 28 ) | ( pa & 0xfffffff ); unsigned long va = (vsid << 28) | (pa & 0xfffffff);
unsigned long vpn = va >> PAGE_SHIFT; unsigned long vpn = va >> PAGE_SHIFT;
unsigned long slot = HvCallHpt_findValid( &hpte, vpn ); unsigned long slot = HvCallHpt_findValid(&hpte, vpn);
if ( hpte.dw0.dw0.v ) {
if (hpte.dw0.dw0.v) {
/* HPTE exists, so just bolt it */ /* HPTE exists, so just bolt it */
HvCallHpt_setSwBits( slot, 0x10, 0 ); HvCallHpt_setSwBits(slot, 0x10, 0);
} else { /* And make sure the pp bits are correct */
HvCallHpt_setPp(slot, PP_RWXX);
} else
/* No HPTE exists, so create a new bolted one */ /* No HPTE exists, so create a new bolted one */
build_valid_hpte(vsid, ea, pa, NULL, mode_rw, 1); iSeries_make_pte(va, (unsigned long)__v2a(ea),
} mode_rw);
} }
} }
#endif /* CONFIG_PPC_ISERIES */
extern unsigned long ppc_proc_freq; extern unsigned long ppc_proc_freq;
extern unsigned long ppc_tb_freq; extern unsigned long ppc_tb_freq;
...@@ -639,10 +671,9 @@ extern unsigned long ppc_tb_freq; ...@@ -639,10 +671,9 @@ extern unsigned long ppc_tb_freq;
/* /*
* Document me. * Document me.
*/ */
void __init void __init iSeries_setup_arch(void)
iSeries_setup_arch(void)
{ {
void * eventStack; void *eventStack;
unsigned procIx = get_paca()->xLpPaca.xDynHvPhysicalProcIndex; unsigned procIx = get_paca()->xLpPaca.xDynHvPhysicalProcIndex;
/* Add an eye catcher and the systemcfg layout version number */ /* Add an eye catcher and the systemcfg layout version number */
...@@ -657,50 +688,43 @@ iSeries_setup_arch(void) ...@@ -657,50 +688,43 @@ iSeries_setup_arch(void)
* we subtract out the KERNELBASE and add in the * we subtract out the KERNELBASE and add in the
* absolute real address of the kernel load area * absolute real address of the kernel load area
*/ */
eventStack = alloc_bootmem_pages(LpEventStackSize);
eventStack = alloc_bootmem_pages( LpEventStackSize ); memset(eventStack, 0, LpEventStackSize);
memset( eventStack, 0, LpEventStackSize );
/* Invoke the hypervisor to initialize the event stack */ /* Invoke the hypervisor to initialize the event stack */
HvCallEvent_setLpEventStack(0, eventStack, LpEventStackSize);
HvCallEvent_setLpEventStack( 0, eventStack, LpEventStackSize );
/* Initialize fields in our Lp Event Queue */ /* Initialize fields in our Lp Event Queue */
xItLpQueue.xSlicEventStackPtr = (char *)eventStack; xItLpQueue.xSlicEventStackPtr = (char *)eventStack;
xItLpQueue.xSlicCurEventPtr = (char *)eventStack; xItLpQueue.xSlicCurEventPtr = (char *)eventStack;
xItLpQueue.xSlicLastValidEventPtr = (char *)eventStack + xItLpQueue.xSlicLastValidEventPtr = (char *)eventStack +
(LpEventStackSize - LpEventMaxSize); (LpEventStackSize - LpEventMaxSize);
xItLpQueue.xIndex = 0; xItLpQueue.xIndex = 0;
/* Compute processor frequency */ /* Compute processor frequency */
procFreqHz = (((1UL<<34) * 1000000) / xIoHriProcessorVpd[procIx].xProcFreq ); procFreqHz = ((1UL << 34) * 1000000) /
xIoHriProcessorVpd[procIx].xProcFreq;
procFreqMhz = procFreqHz / 1000000; procFreqMhz = procFreqHz / 1000000;
procFreqMhzHundreths = (procFreqHz/10000) - (procFreqMhz*100); procFreqMhzHundreths = (procFreqHz / 10000) - (procFreqMhz * 100);
ppc_proc_freq = procFreqHz; ppc_proc_freq = procFreqHz;
/* Compute time base frequency */ /* Compute time base frequency */
tbFreqHz = (((1UL<<32) * 1000000) / xIoHriProcessorVpd[procIx].xTimeBaseFreq ); tbFreqHz = ((1UL << 32) * 1000000) /
xIoHriProcessorVpd[procIx].xTimeBaseFreq;
tbFreqMhz = tbFreqHz / 1000000; tbFreqMhz = tbFreqHz / 1000000;
tbFreqMhzHundreths = (tbFreqHz/10000) - (tbFreqMhz*100); tbFreqMhzHundreths = (tbFreqHz / 10000) - (tbFreqMhz * 100);
ppc_tb_freq = tbFreqHz; ppc_tb_freq = tbFreqHz;
printk("Max logical processors = %d\n", printk("Max logical processors = %d\n",
itVpdAreas.xSlicMaxLogicalProcs ); itVpdAreas.xSlicMaxLogicalProcs);
printk("Max physical processors = %d\n", printk("Max physical processors = %d\n",
itVpdAreas.xSlicMaxPhysicalProcs ); itVpdAreas.xSlicMaxPhysicalProcs);
printk("Processor frequency = %lu.%02lu\n", printk("Processor frequency = %lu.%02lu\n", procFreqMhz,
procFreqMhz, procFreqMhzHundreths);
procFreqMhzHundreths ); printk("Time base frequency = %lu.%02lu\n", tbFreqMhz,
printk("Time base frequency = %lu.%02lu\n", tbFreqMhzHundreths);
tbFreqMhz,
tbFreqMhzHundreths );
systemcfg->processor = xIoHriProcessorVpd[procIx].xPVR; systemcfg->processor = xIoHriProcessorVpd[procIx].xPVR;
printk("Processor version = %x\n", systemcfg->processor); printk("Processor version = %x\n", systemcfg->processor);
} }
/* /*
...@@ -715,38 +739,27 @@ iSeries_setup_arch(void) ...@@ -715,38 +739,27 @@ iSeries_setup_arch(void)
* *
* Output(s): * Output(s):
* *buffer - Buffer with CPU data. * *buffer - Buffer with CPU data.
*
* Returns:
* The number of bytes copied into 'buffer' if OK, otherwise zero or less
* on error.
*/ */
void iSeries_setup_residual(struct seq_file *m) void iSeries_setup_residual(struct seq_file *m, int cpu_id)
{ {
seq_printf(m, "clock\t\t: %lu.%02luMhz\n", procFreqMhz,
seq_printf(m,"clock\t\t: %lu.%02luMhz\n", procFreqMhzHundreths);
procFreqMhz, procFreqMhzHundreths ); seq_printf(m, "time base\t: %lu.%02luMHz\n", tbFreqMhz,
seq_printf(m,"time base\t: %lu.%02luMHz\n", tbFreqMhzHundreths);
tbFreqMhz, tbFreqMhzHundreths ); seq_printf(m, "i-cache\t\t: %d\n", systemcfg->iCacheL1LineSize);
seq_printf(m,"i-cache\t\t: %d\n", seq_printf(m, "d-cache\t\t: %d\n", systemcfg->dCacheL1LineSize);
systemcfg->iCacheL1LineSize);
seq_printf(m,"d-cache\t\t: %d\n",
systemcfg->dCacheL1LineSize);
} }
void iSeries_get_cpuinfo(struct seq_file *m) void iSeries_get_cpuinfo(struct seq_file *m)
{ {
seq_printf(m, "machine\t\t: 64-bit iSeries Logical Partition\n");
seq_printf(m,"machine\t\t: 64-bit iSeries Logical Partition\n");
} }
/* /*
* Document me. * Document me.
* and Implement me. * and Implement me.
*/ */
int int iSeries_get_irq(struct pt_regs *regs)
iSeries_get_irq(struct pt_regs *regs)
{ {
/* -2 means ignore this interrupt */ /* -2 means ignore this interrupt */
return -2; return -2;
...@@ -755,8 +768,7 @@ iSeries_get_irq(struct pt_regs *regs) ...@@ -755,8 +768,7 @@ iSeries_get_irq(struct pt_regs *regs)
/* /*
* Document me. * Document me.
*/ */
void void iSeries_restart(char *cmd)
iSeries_restart(char *cmd)
{ {
mf_reboot(); mf_reboot();
} }
...@@ -764,8 +776,7 @@ iSeries_restart(char *cmd) ...@@ -764,8 +776,7 @@ iSeries_restart(char *cmd)
/* /*
* Document me. * Document me.
*/ */
void void iSeries_power_off(void)
iSeries_power_off(void)
{ {
mf_powerOff(); mf_powerOff();
} }
...@@ -773,8 +784,7 @@ iSeries_power_off(void) ...@@ -773,8 +784,7 @@ iSeries_power_off(void)
/* /*
* Document me. * Document me.
*/ */
void void iSeries_halt(void)
iSeries_halt(void)
{ {
mf_powerOff(); mf_powerOff();
} }
...@@ -792,24 +802,19 @@ extern void setup_default_decr(void); ...@@ -792,24 +802,19 @@ extern void setup_default_decr(void);
* and sets up the kernel timer decrementer based on that value. * and sets up the kernel timer decrementer based on that value.
* *
*/ */
void __init void __init iSeries_calibrate_decr(void)
iSeries_calibrate_decr(void)
{ {
unsigned long cyclesPerUsec; unsigned long cyclesPerUsec;
struct div_result divres; struct div_result divres;
/* Compute decrementer (and TB) frequency /* Compute decrementer (and TB) frequency in cycles/sec */
* in cycles/sec cyclesPerUsec = ppc_tb_freq / 1000000;
*/
cyclesPerUsec = ppc_tb_freq / 1000000; /* cycles / usec */ /*
* Set the amount to refresh the decrementer by. This
/* Set the amount to refresh the decrementer by. This
* is the number of decrementer ticks it takes for * is the number of decrementer ticks it takes for
* 1/HZ seconds. * 1/HZ seconds.
*/ */
tb_ticks_per_jiffy = ppc_tb_freq / HZ; tb_ticks_per_jiffy = ppc_tb_freq / HZ;
#if 0 #if 0
...@@ -824,47 +829,54 @@ iSeries_calibrate_decr(void) ...@@ -824,47 +829,54 @@ iSeries_calibrate_decr(void)
* that jiffies (and xtime) will match the time returned * that jiffies (and xtime) will match the time returned
* by do_gettimeofday. * by do_gettimeofday.
*/ */
tb_ticks_per_sec = tb_ticks_per_jiffy * HZ; tb_ticks_per_sec = tb_ticks_per_jiffy * HZ;
tb_ticks_per_usec = cyclesPerUsec; tb_ticks_per_usec = cyclesPerUsec;
tb_to_us = mulhwu_scale_factor(ppc_tb_freq, 1000000); tb_to_us = mulhwu_scale_factor(ppc_tb_freq, 1000000);
div128_by_32( 1024*1024, 0, tb_ticks_per_sec, &divres ); div128_by_32(1024 * 1024, 0, tb_ticks_per_sec, &divres);
tb_to_xs = divres.result_low; tb_to_xs = divres.result_low;
setup_default_decr(); setup_default_decr();
} }
void __init void __init iSeries_progress(char * st, unsigned short code)
iSeries_progress( char * st, unsigned short code )
{ {
printk( "Progress: [%04x] - %s\n", (unsigned)code, st ); printk("Progress: [%04x] - %s\n", (unsigned)code, st);
if ( !piranha_simulator && mf_initialized ) { if (!piranha_simulator && mf_initialized) {
if (code != 0xffff) if (code != 0xffff)
mf_displayProgress( code ); mf_displayProgress(code);
else else
mf_clearSrc(); mf_clearSrc();
} }
} }
void iSeries_fixup_klimit(void) void iSeries_fixup_klimit(void)
{ {
/* Change klimit to take into account any ram disk that may be included */ /*
* Change klimit to take into account any ram disk
* that may be included
*/
if (naca->xRamDisk) if (naca->xRamDisk)
klimit = KERNELBASE + (u64)naca->xRamDisk + (naca->xRamDiskSize * PAGE_SIZE); klimit = KERNELBASE + (u64)naca->xRamDisk +
(naca->xRamDiskSize * PAGE_SIZE);
else { else {
/* No ram disk was included - check and see if there was an embedded system map */ /*
/* Change klimit to take into account any embedded system map */ * No ram disk was included - check and see if there
* was an embedded system map. Change klimit to take
* into account any embedded system map
*/
if (embedded_sysmap_end) if (embedded_sysmap_end)
klimit = KERNELBASE + ((embedded_sysmap_end+4095) & 0xfffffffffffff000); klimit = KERNELBASE + ((embedded_sysmap_end + 4095) &
0xfffffffffffff000);
} }
} }
static void iSeries_setup_dprofile(void) static void iSeries_setup_dprofile(void)
{ {
if ( dprof_buffer ) { if (dprof_buffer) {
unsigned i; unsigned i;
for (i=0; i<NR_CPUS; ++i) {
for (i = 0; i < NR_CPUS; ++i) {
paca[i].prof_shift = dprof_shift; paca[i].prof_shift = dprof_shift;
paca[i].prof_len = dprof_len-1; paca[i].prof_len = dprof_len - 1;
paca[i].prof_buffer = dprof_buffer; paca[i].prof_buffer = dprof_buffer;
paca[i].prof_stext = (unsigned *)_stext; paca[i].prof_stext = (unsigned *)_stext;
mb(); mb();
......
...@@ -19,25 +19,24 @@ ...@@ -19,25 +19,24 @@
#ifndef __ISERIES_SETUP_H__ #ifndef __ISERIES_SETUP_H__
#define __ISERIES_SETUP_H__ #define __ISERIES_SETUP_H__
extern void iSeries_init_early(void); extern void iSeries_init_early(void);
extern void iSeries_init(unsigned long r3, extern void iSeries_init(unsigned long r3, unsigned long ird_start,
unsigned long ird_start, unsigned long ird_end, unsigned long cline_start,
unsigned long ird_end, unsigned long cline_end);
unsigned long cline_start, extern void iSeries_setup_arch(void);
unsigned long cline_end); extern void iSeries_setup_residual(struct seq_file *m, int cpu_id);
extern void iSeries_setup_arch(void); extern void iSeries_get_cpuinfo(struct seq_file *m);
extern void iSeries_setup_residual(struct seq_file *m); extern void iSeries_init_IRQ(void);
extern void iSeries_get_cpuinfo(struct seq_file *m); extern void iSeries_init_irq_desc(irq_desc_t *);
extern void iSeries_init_IRQ(void); extern int iSeries_get_irq(struct pt_regs *regs);
extern int iSeries_get_irq(struct pt_regs *regs); extern void iSeries_restart(char *cmd);
extern void iSeries_restart(char *cmd); extern void iSeries_power_off(void);
extern void iSeries_power_off(void); extern void iSeries_halt(void);
extern void iSeries_halt(void); extern void iSeries_time_init(void);
extern void iSeries_time_init(void); extern void iSeries_get_boot_time(struct rtc_time *tm);
extern void iSeries_get_boot_time(struct rtc_time *tm); extern int iSeries_set_rtc_time(struct rtc_time *tm);
extern int iSeries_set_rtc_time(unsigned long now); extern void iSeries_get_rtc_time(struct rtc_time *tm);
extern unsigned long iSeries_get_rtc_time(void); extern void iSeries_calibrate_decr(void);
extern void iSeries_calibrate_decr(void); extern void iSeries_progress( char *, unsigned short );
extern void iSeries_progress( char *, unsigned short );
#endif /* __ISERIES_SETUP_H__ */ #endif /* __ISERIES_SETUP_H__ */
...@@ -70,7 +70,7 @@ static void yield_shared_processor(void) ...@@ -70,7 +70,7 @@ static void yield_shared_processor(void)
lpaca->next_jiffy_update_tb); lpaca->next_jiffy_update_tb);
lpaca->yielded = 0; /* Back to IPI's */ lpaca->yielded = 0; /* Back to IPI's */
locale_irq_enable(); local_irq_enable();
/* /*
* The decrementer stops during the yield. Force a fake * The decrementer stops during the yield. Force a fake
...@@ -89,16 +89,14 @@ int iSeries_idle(void) ...@@ -89,16 +89,14 @@ int iSeries_idle(void)
long oldval; long oldval;
unsigned long CTRL; unsigned long CTRL;
/* endless loop with no priority at all */
current->nice = 20;
current->counter = -100;
/* ensure iSeries run light will be out when idle */ /* ensure iSeries run light will be out when idle */
current->thread.flags &= ~PPC_FLAG_RUN_LIGHT; clear_thread_flag(TIF_RUN_LIGHT);
CTRL = mfspr(CTRLF); CTRL = mfspr(CTRLF);
CTRL &= ~RUNLATCH; CTRL &= ~RUNLATCH;
mtspr(CTRLT, CTRL); mtspr(CTRLT, CTRL);
#if 0
init_idle(); init_idle();
#endif
lpaca = get_paca(); lpaca = get_paca();
...@@ -106,26 +104,29 @@ int iSeries_idle(void) ...@@ -106,26 +104,29 @@ int iSeries_idle(void)
if (lpaca->xLpPaca.xSharedProc) { if (lpaca->xLpPaca.xSharedProc) {
if (ItLpQueue_isLpIntPending(lpaca->lpQueuePtr)) if (ItLpQueue_isLpIntPending(lpaca->lpQueuePtr))
process_iSeries_events(); process_iSeries_events();
if (!current->need_resched) if (!need_resched())
yield_shared_processor(); yield_shared_processor();
} else { } else {
/* Avoid an IPI by setting need_resched */ oldval = test_and_clear_thread_flag(TIF_NEED_RESCHED);
oldval = xchg(&current->need_resched, -1);
if (!oldval) { if (!oldval) {
while(current->need_resched == -1) { set_thread_flag(TIF_POLLING_NRFLAG);
while (!need_resched()) {
HMT_medium(); HMT_medium();
if (ItLpQueue_isLpIntPending(lpaca->lpQueuePtr)) if (ItLpQueue_isLpIntPending(lpaca->lpQueuePtr))
process_iSeries_events(); process_iSeries_events();
HMT_low(); HMT_low();
} }
HMT_medium();
clear_thread_flag(TIF_POLLING_NRFLAG);
} else {
set_need_resched();
} }
} }
HMT_medium();
if (current->need_resched) { schedule();
lpaca->xLpPaca.xIdle = 0;
schedule();
check_pgt_cache();
}
} }
return 0; return 0;
} }
...@@ -158,10 +159,11 @@ int default_idle(void) ...@@ -158,10 +159,11 @@ int default_idle(void)
return 0; return 0;
} }
#ifdef CONFIG_PPC_PSERIES
int dedicated_idle(void) int dedicated_idle(void)
{ {
long oldval; long oldval;
struct paca_struct *lpaca = get_paca(), *ppaca;; struct paca_struct *lpaca = get_paca(), *ppaca;
unsigned long start_snooze; unsigned long start_snooze;
ppaca = &paca[(lpaca->xPacaIndex) ^ 1]; ppaca = &paca[(lpaca->xPacaIndex) ^ 1];
...@@ -274,6 +276,7 @@ int shared_idle(void) ...@@ -274,6 +276,7 @@ int shared_idle(void)
return 0; return 0;
} }
#endif
int cpu_idle(void) int cpu_idle(void)
{ {
......
...@@ -59,7 +59,6 @@ ...@@ -59,7 +59,6 @@
extern void iSeries_smp_message_recv( struct pt_regs * ); extern void iSeries_smp_message_recv( struct pt_regs * );
#endif #endif
volatile unsigned char *chrp_int_ack_special;
static void register_irq_proc (unsigned int irq); static void register_irq_proc (unsigned int irq);
irq_desc_t irq_desc[NR_IRQS] __cacheline_aligned = { irq_desc_t irq_desc[NR_IRQS] __cacheline_aligned = {
...@@ -561,17 +560,14 @@ void ppc_irq_dispatch_handler(struct pt_regs *regs, int irq) ...@@ -561,17 +560,14 @@ void ppc_irq_dispatch_handler(struct pt_regs *regs, int irq)
spin_unlock(&desc->lock); spin_unlock(&desc->lock);
} }
#ifdef CONFIG_PPC_ISERIES
int do_IRQ(struct pt_regs *regs) int do_IRQ(struct pt_regs *regs)
{ {
int irq, first = 1;
#ifdef CONFIG_PPC_ISERIES
struct paca_struct *lpaca; struct paca_struct *lpaca;
struct ItLpQueue *lpq; struct ItLpQueue *lpq;
#endif
irq_enter(); irq_enter();
#ifdef CONFIG_PPC_ISERIES
lpaca = get_paca(); lpaca = get_paca();
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
if (lpaca->xLpPaca.xIntDword.xFields.xIpiCnt) { if (lpaca->xLpPaca.xIntDword.xFields.xIpiCnt) {
...@@ -582,7 +578,24 @@ int do_IRQ(struct pt_regs *regs) ...@@ -582,7 +578,24 @@ int do_IRQ(struct pt_regs *regs)
lpq = lpaca->lpQueuePtr; lpq = lpaca->lpQueuePtr;
if (lpq && ItLpQueue_isLpIntPending(lpq)) if (lpq && ItLpQueue_isLpIntPending(lpq))
lpEvent_count += ItLpQueue_process(lpq, regs); lpEvent_count += ItLpQueue_process(lpq, regs);
#else
irq_exit();
if (lpaca->xLpPaca.xIntDword.xFields.xDecrInt) {
lpaca->xLpPaca.xIntDword.xFields.xDecrInt = 0;
/* Signal a fake decrementer interrupt */
timer_interrupt(regs);
}
return 1; /* lets ret_from_int know we can do checks */
}
#else /* CONFIG_PPC_ISERIES */
int do_IRQ(struct pt_regs *regs)
{
int irq, first = 1;
irq_enter();
/* /*
* Every arch is required to implement ppc_md.get_irq. * Every arch is required to implement ppc_md.get_irq.
* This function will either return an irq number or -1 to * This function will either return an irq number or -1 to
...@@ -598,20 +611,12 @@ int do_IRQ(struct pt_regs *regs) ...@@ -598,20 +611,12 @@ int do_IRQ(struct pt_regs *regs)
if (irq != -2 && first) if (irq != -2 && first)
/* That's not SMP safe ... but who cares ? */ /* That's not SMP safe ... but who cares ? */
ppc_spurious_interrupts++; ppc_spurious_interrupts++;
#endif
irq_exit(); irq_exit();
#ifdef CONFIG_PPC_ISERIES
if (lpaca->xLpPaca.xIntDword.xFields.xDecrInt) {
lpaca->xLpPaca.xIntDword.xFields.xDecrInt = 0;
/* Signal a fake decrementer interrupt */
timer_interrupt(regs);
}
#endif
return 1; /* lets ret_from_int know we can do checks */ return 1; /* lets ret_from_int know we can do checks */
} }
#endif /* CONFIG_PPC_ISERIES */
unsigned long probe_irq_on (void) unsigned long probe_irq_on (void)
{ {
...@@ -636,10 +641,10 @@ void __init init_IRQ(void) ...@@ -636,10 +641,10 @@ void __init init_IRQ(void)
{ {
static int once = 0; static int once = 0;
if ( once ) if (once)
return; return;
else
once++; once++;
ppc_md.init_IRQ(); ppc_md.init_IRQ();
} }
......
...@@ -42,160 +42,120 @@ ...@@ -42,160 +42,120 @@
#include <linux/pci.h> #include <linux/pci.h>
#include <linux/bcd.h> #include <linux/bcd.h>
extern struct pci_dev * iSeries_vio_dev; extern struct pci_dev *iSeries_vio_dev;
/* /*
* This is the structure layout for the Machine Facilites LPAR event * This is the structure layout for the Machine Facilites LPAR event
* flows. * flows.
*/ */
struct VspCmdData; union safe_cast {
struct CeMsgData; u64 ptr_as_u64;
union SafeCast
{
u64 ptrAsU64;
void *ptr; void *ptr;
}; };
struct VspCmdData {
union safe_cast token;
u16 cmd;
HvLpIndex lp_index;
u8 result_code;
u32 reserved;
union {
u64 state; /* GetStateOut */
u64 ipl_type; /* GetIplTypeOut, Function02SelectIplTypeIn */
u64 ipl_mode; /* GetIplModeOut, Function02SelectIplModeIn */
u64 page[4]; /* GetSrcHistoryIn */
u64 flag; /* GetAutoIplWhenPrimaryIplsOut,
SetAutoIplWhenPrimaryIplsIn,
WhiteButtonPowerOffIn,
Function08FastPowerOffIn,
IsSpcnRackPowerIncompleteOut */
struct {
u64 token;
u64 address_type;
u64 side;
u32 length;
u32 offset;
} kern; /* SetKernelImageIn, GetKernelImageIn,
SetKernelCmdLineIn, GetKernelCmdLineIn */
u32 length_out; /* GetKernelImageOut, GetKernelCmdLineOut */
u8 reserved[80];
} sub_data;
};
typedef void (*CeMsgCompleteHandler)( void *token, struct CeMsgData *vspCmdRsp ); struct VspRspData {
struct semaphore *sem;
struct VspCmdData *response;
};
struct CeMsgCompleteData struct AllocData {
{ u16 size;
CeMsgCompleteHandler xHdlr; u16 type;
void *xToken; u32 count;
u16 reserved1;
u8 reserved2;
HvLpIndex target_lp;
}; };
struct VspRspData struct CeMsgData;
{
struct semaphore *xSemaphore; typedef void (*CeMsgCompleteHandler)(void *token, struct CeMsgData *vspCmdRsp);
struct VspCmdData *xResponse;
struct CeMsgCompleteData {
CeMsgCompleteHandler handler;
void *token;
}; };
struct IoMFLpEvent struct CeMsgData {
{ u8 ce_msg[12];
struct HvLpEvent xHvLpEvent; char reserved[4];
struct CeMsgCompleteData *completion;
u16 xSubtypeRc; };
u16 xRsvd1;
u32 xRsvd2; struct IoMFLpEvent {
struct HvLpEvent hp_lp_event;
union u16 subtype_result_code;
{ u16 reserved1;
u32 reserved2;
struct AllocData union {
{ struct AllocData alloc;
u16 xSize; struct CeMsgData ce_msg;
u16 xType; struct VspCmdData vsp_cmd;
u32 xCount; } data;
u16 xRsvd3;
u8 xRsvd4;
HvLpIndex xTargetLp;
} xAllocData;
struct CeMsgData
{
u8 xCEMsg[12];
char xReserved[4];
struct CeMsgCompleteData *xToken;
} xCEMsgData;
struct VspCmdData
{
union SafeCast xTokenUnion;
u16 xCmd;
HvLpIndex xLpIndex;
u8 xRc;
u32 xReserved1;
union VspCmdSubData
{
struct
{
u64 xState;
} xGetStateOut;
struct
{
u64 xIplType;
} xGetIplTypeOut, xFunction02SelectIplTypeIn;
struct
{
u64 xIplMode;
} xGetIplModeOut, xFunction02SelectIplModeIn;
struct
{
u64 xPage[4];
} xGetSrcHistoryIn;
struct
{
u64 xFlag;
} xGetAutoIplWhenPrimaryIplsOut,
xSetAutoIplWhenPrimaryIplsIn,
xWhiteButtonPowerOffIn,
xFunction08FastPowerOffIn,
xIsSpcnRackPowerIncompleteOut;
struct
{
u64 xToken;
u64 xAddressType;
u64 xSide;
u32 xTransferLength;
u32 xOffset;
} xSetKernelImageIn,
xGetKernelImageIn,
xSetKernelCmdLineIn,
xGetKernelCmdLineIn;
struct
{
u32 xTransferLength;
} xGetKernelImageOut,xGetKernelCmdLineOut;
u8 xReserved2[80];
} xSubData;
} xVspCmd;
} xUnion;
}; };
#define subtype_data(a, b, c, d) \
(((a) << 24) + ((b) << 16) + ((c) << 8) + (d))
/* /*
* All outgoing event traffic is kept on a FIFO queue. The first * All outgoing event traffic is kept on a FIFO queue. The first
* pointer points to the one that is outstanding, and all new * pointer points to the one that is outstanding, and all new
* requests get stuck on the end. Also, we keep a certain number of * requests get stuck on the end. Also, we keep a certain number of
* preallocated stack elements so that we can operate very early in * preallocated pending events so that we can operate very early in
* the boot up sequence (before kmalloc is ready). * the boot up sequence (before kmalloc is ready).
*/ */
struct StackElement struct pending_event {
{ struct pending_event *next;
struct StackElement * next;
struct IoMFLpEvent event; struct IoMFLpEvent event;
MFCompleteHandler hdlr; MFCompleteHandler hdlr;
char dmaData[72]; char dma_data[72];
unsigned dmaDataLength; unsigned dma_data_length;
unsigned remoteAddress; unsigned remote_address;
}; };
static spinlock_t spinlock; static spinlock_t pending_event_spinlock;
static struct StackElement * head = NULL; static struct pending_event *pending_event_head;
static struct StackElement * tail = NULL; static struct pending_event *pending_event_tail;
static struct StackElement * avail = NULL; static struct pending_event *pending_event_avail;
static struct StackElement prealloc[16]; static struct pending_event pending_event_prealloc[16];
/* /*
* Put a stack element onto the available queue, so it can get reused. * Put a pending event onto the available queue, so it can get reused.
* Attention! You must have the spinlock before calling! * Attention! You must have the pending_event_spinlock before calling!
*/ */
void free( struct StackElement * element ) static void free_pending_event(struct pending_event *ev)
{ {
if ( element != NULL ) if (ev != NULL) {
{ ev->next = pending_event_avail;
element->next = avail; pending_event_avail = ev;
avail = element;
} }
} }
...@@ -203,68 +163,68 @@ void free( struct StackElement * element ) ...@@ -203,68 +163,68 @@ void free( struct StackElement * element )
* Enqueue the outbound event onto the stack. If the queue was * Enqueue the outbound event onto the stack. If the queue was
* empty to begin with, we must also issue it via the Hypervisor * empty to begin with, we must also issue it via the Hypervisor
* interface. There is a section of code below that will touch * interface. There is a section of code below that will touch
* the first stack pointer without the protection of the spinlock. * the first stack pointer without the protection of the pending_event_spinlock.
* This is OK, because we know that nobody else will be modifying * This is OK, because we know that nobody else will be modifying
* the first pointer when we do this. * the first pointer when we do this.
*/ */
static int signalEvent( struct StackElement * newElement ) static int signal_event(struct pending_event *ev)
{ {
int rc = 0; int rc = 0;
unsigned long flags; unsigned long flags;
int go = 1; int go = 1;
struct StackElement * element; struct pending_event *ev1;
HvLpEvent_Rc hvRc; HvLpEvent_Rc hvRc;
/* enqueue the event */ /* enqueue the event */
if ( newElement != NULL ) if (ev != NULL) {
{ ev->next = NULL;
spin_lock_irqsave( &spinlock, flags ); spin_lock_irqsave(&pending_event_spinlock, flags);
if ( head == NULL ) if (pending_event_head == NULL)
head = newElement; pending_event_head = ev;
else { else {
go = 0; go = 0;
tail->next = newElement; pending_event_tail->next = ev;
} }
newElement->next = NULL; pending_event_tail = ev;
tail = newElement; spin_unlock_irqrestore(&pending_event_spinlock, flags);
spin_unlock_irqrestore( &spinlock, flags );
} }
/* send the event */ /* send the event */
while ( go ) while (go) {
{
go = 0; go = 0;
/* any DMA data to send beforehand? */ /* any DMA data to send beforehand? */
if ( head->dmaDataLength > 0 ) if (pending_event_head->dma_data_length > 0)
HvCallEvent_dmaToSp( head->dmaData, head->remoteAddress, head->dmaDataLength, HvLpDma_Direction_LocalToRemote ); HvCallEvent_dmaToSp(pending_event_head->dma_data,
pending_event_head->remote_address,
hvRc = HvCallEvent_signalLpEvent(&head->event.xHvLpEvent); pending_event_head->dma_data_length,
if ( hvRc != HvLpEvent_Rc_Good ) HvLpDma_Direction_LocalToRemote);
{
printk( KERN_ERR "mf.c: HvCallEvent_signalLpEvent() failed with %d\n", (int)hvRc ); hvRc = HvCallEvent_signalLpEvent(
&pending_event_head->event.hp_lp_event);
spin_lock_irqsave( &spinlock, flags ); if (hvRc != HvLpEvent_Rc_Good) {
element = head; printk(KERN_ERR "mf.c: HvCallEvent_signalLpEvent() failed with %d\n",
head = head->next; (int)hvRc);
if ( head != NULL )
spin_lock_irqsave(&pending_event_spinlock, flags);
ev1 = pending_event_head;
pending_event_head = pending_event_head->next;
if (pending_event_head != NULL)
go = 1; go = 1;
spin_unlock_irqrestore( &spinlock, flags ); spin_unlock_irqrestore(&pending_event_spinlock, flags);
if ( element == newElement ) if (ev1 == ev)
rc = -EIO; rc = -EIO;
else { else if (ev1->hdlr != NULL) {
if ( element->hdlr != NULL ) union safe_cast mySafeCast;
{
union SafeCast mySafeCast; mySafeCast.ptr_as_u64 = ev1->event.hp_lp_event.xCorrelationToken;
mySafeCast.ptrAsU64 = element->event.xHvLpEvent.xCorrelationToken; (*ev1->hdlr)(mySafeCast.ptr, -EIO);
(*element->hdlr)( mySafeCast.ptr, -EIO );
}
} }
spin_lock_irqsave( &spinlock, flags ); spin_lock_irqsave(&pending_event_spinlock, flags);
free( element ); free_pending_event(ev1);
spin_unlock_irqrestore( &spinlock, flags ); spin_unlock_irqrestore(&pending_event_spinlock, flags);
} }
} }
...@@ -272,80 +232,74 @@ static int signalEvent( struct StackElement * newElement ) ...@@ -272,80 +232,74 @@ static int signalEvent( struct StackElement * newElement )
} }
/* /*
* Allocate a new StackElement structure, and initialize it. * Allocate a new pending_event structure, and initialize it.
*/ */
static struct StackElement * newStackElement( void ) static struct pending_event *new_pending_event(void)
{ {
struct StackElement * newElement = NULL; struct pending_event *ev = NULL;
HvLpIndex primaryLp = HvLpConfig_getPrimaryLpIndex(); HvLpIndex primaryLp = HvLpConfig_getPrimaryLpIndex();
unsigned long flags; unsigned long flags;
struct HvLpEvent *hev;
if ( newElement == NULL ) spin_lock_irqsave(&pending_event_spinlock, flags);
{ if (pending_event_avail != NULL) {
spin_lock_irqsave( &spinlock, flags ); ev = pending_event_avail;
if ( avail != NULL ) pending_event_avail = pending_event_avail->next;
{
newElement = avail;
avail = avail->next;
}
spin_unlock_irqrestore( &spinlock, flags );
} }
spin_unlock_irqrestore(&pending_event_spinlock, flags);
if ( newElement == NULL ) if (ev == NULL)
newElement = kmalloc(sizeof(struct StackElement),GFP_ATOMIC); ev = kmalloc(sizeof(struct pending_event),GFP_ATOMIC);
if (ev == NULL) {
if ( newElement == NULL ) printk(KERN_ERR "mf.c: unable to kmalloc %ld bytes\n",
{ sizeof(struct pending_event));
printk( KERN_ERR "mf.c: unable to kmalloc %ld bytes\n", sizeof(struct StackElement) );
return NULL; return NULL;
} }
memset(ev, 0, sizeof(struct pending_event));
memset( newElement, 0, sizeof(struct StackElement) ); hev = &ev->event.hp_lp_event;
newElement->event.xHvLpEvent.xFlags.xValid = 1; hev->xFlags.xValid = 1;
newElement->event.xHvLpEvent.xFlags.xAckType = HvLpEvent_AckType_ImmediateAck; hev->xFlags.xAckType = HvLpEvent_AckType_ImmediateAck;
newElement->event.xHvLpEvent.xFlags.xAckInd = HvLpEvent_AckInd_DoAck; hev->xFlags.xAckInd = HvLpEvent_AckInd_DoAck;
newElement->event.xHvLpEvent.xFlags.xFunction = HvLpEvent_Function_Int; hev->xFlags.xFunction = HvLpEvent_Function_Int;
newElement->event.xHvLpEvent.xType = HvLpEvent_Type_MachineFac; hev->xType = HvLpEvent_Type_MachineFac;
newElement->event.xHvLpEvent.xSourceLp = HvLpConfig_getLpIndex(); hev->xSourceLp = HvLpConfig_getLpIndex();
newElement->event.xHvLpEvent.xTargetLp = primaryLp; hev->xTargetLp = primaryLp;
newElement->event.xHvLpEvent.xSizeMinus1 = sizeof(newElement->event)-1; hev->xSizeMinus1 = sizeof(ev->event)-1;
newElement->event.xHvLpEvent.xRc = HvLpEvent_Rc_Good; hev->xRc = HvLpEvent_Rc_Good;
newElement->event.xHvLpEvent.xSourceInstanceId = HvCallEvent_getSourceLpInstanceId(primaryLp,HvLpEvent_Type_MachineFac); hev->xSourceInstanceId = HvCallEvent_getSourceLpInstanceId(primaryLp,
newElement->event.xHvLpEvent.xTargetInstanceId = HvCallEvent_getTargetLpInstanceId(primaryLp,HvLpEvent_Type_MachineFac); HvLpEvent_Type_MachineFac);
hev->xTargetInstanceId = HvCallEvent_getTargetLpInstanceId(primaryLp,
return newElement; HvLpEvent_Type_MachineFac);
return ev;
} }
static int signalVspInstruction( struct VspCmdData *vspCmd ) static int signal_vsp_instruction(struct VspCmdData *vspCmd)
{ {
struct StackElement * newElement = newStackElement(); struct pending_event *ev = new_pending_event();
int rc = 0; int rc;
struct VspRspData response; struct VspRspData response;
DECLARE_MUTEX_LOCKED(Semaphore); DECLARE_MUTEX_LOCKED(Semaphore);
response.xSemaphore = &Semaphore;
response.xResponse = vspCmd;
if ( newElement == NULL ) if (ev == NULL)
rc = -ENOMEM; return -ENOMEM;
else {
newElement->event.xHvLpEvent.xSubtype = 6;
newElement->event.xHvLpEvent.x.xSubtypeData = ('M'<<24)+('F'<<16)+('V'<<8)+('I'<<0);
newElement->event.xUnion.xVspCmd.xTokenUnion.ptr = &response;
newElement->event.xUnion.xVspCmd.xCmd = vspCmd->xCmd;
newElement->event.xUnion.xVspCmd.xLpIndex = HvLpConfig_getLpIndex();
newElement->event.xUnion.xVspCmd.xRc = 0xFF;
newElement->event.xUnion.xVspCmd.xReserved1 = 0;
memcpy(&(newElement->event.xUnion.xVspCmd.xSubData),&(vspCmd->xSubData), sizeof(vspCmd->xSubData));
mb();
rc = signalEvent(newElement);
}
response.sem = &Semaphore;
response.response = vspCmd;
ev->event.hp_lp_event.xSubtype = 6;
ev->event.hp_lp_event.x.xSubtypeData =
subtype_data('M', 'F', 'V', 'I');
ev->event.data.vsp_cmd.token.ptr = &response;
ev->event.data.vsp_cmd.cmd = vspCmd->cmd;
ev->event.data.vsp_cmd.lp_index = HvLpConfig_getLpIndex();
ev->event.data.vsp_cmd.result_code = 0xFF;
ev->event.data.vsp_cmd.reserved = 0;
memcpy(&(ev->event.data.vsp_cmd.sub_data),
&(vspCmd->sub_data), sizeof(vspCmd->sub_data));
mb();
rc = signal_event(ev);
if (rc == 0) if (rc == 0)
{
down(&Semaphore); down(&Semaphore);
}
return rc; return rc;
} }
...@@ -353,46 +307,42 @@ static int signalVspInstruction( struct VspCmdData *vspCmd ) ...@@ -353,46 +307,42 @@ static int signalVspInstruction( struct VspCmdData *vspCmd )
/* /*
* Send a 12-byte CE message to the primary partition VSP object * Send a 12-byte CE message to the primary partition VSP object
*/ */
static int signalCEMsg( char * ceMsg, void * token ) static int signal_ce_msg(char *ce_msg, struct CeMsgCompleteData *completion)
{ {
struct StackElement * newElement = newStackElement(); struct pending_event *ev = new_pending_event();
int rc = 0;
if ( newElement == NULL ) if (ev == NULL)
rc = -ENOMEM; return -ENOMEM;
else {
newElement->event.xHvLpEvent.xSubtype = 0;
newElement->event.xHvLpEvent.x.xSubtypeData = ('M'<<24)+('F'<<16)+('C'<<8)+('E'<<0);
memcpy( newElement->event.xUnion.xCEMsgData.xCEMsg, ceMsg, 12 );
newElement->event.xUnion.xCEMsgData.xToken = token;
rc = signalEvent(newElement);
}
return rc; ev->event.hp_lp_event.xSubtype = 0;
ev->event.hp_lp_event.x.xSubtypeData =
subtype_data('M', 'F', 'C', 'E');
memcpy(ev->event.data.ce_msg.ce_msg, ce_msg, 12);
ev->event.data.ce_msg.completion = completion;
return signal_event(ev);
} }
/* /*
* Send a 12-byte CE message and DMA data to the primary partition VSP object * Send a 12-byte CE message and DMA data to the primary partition VSP object
*/ */
static int dmaAndSignalCEMsg( char * ceMsg, void * token, void * dmaData, unsigned dmaDataLength, unsigned remoteAddress ) static int dma_and_signal_ce_msg(char *ce_msg,
struct CeMsgCompleteData *completion, void *dma_data,
unsigned dma_data_length, unsigned remote_address)
{ {
struct StackElement * newElement = newStackElement(); struct pending_event *ev = new_pending_event();
int rc = 0;
if ( newElement == NULL ) if (ev == NULL)
rc = -ENOMEM; return -ENOMEM;
else {
newElement->event.xHvLpEvent.xSubtype = 0;
newElement->event.xHvLpEvent.x.xSubtypeData = ('M'<<24)+('F'<<16)+('C'<<8)+('E'<<0);
memcpy( newElement->event.xUnion.xCEMsgData.xCEMsg, ceMsg, 12 );
newElement->event.xUnion.xCEMsgData.xToken = token;
memcpy( newElement->dmaData, dmaData, dmaDataLength );
newElement->dmaDataLength = dmaDataLength;
newElement->remoteAddress = remoteAddress;
rc = signalEvent(newElement);
}
return rc; ev->event.hp_lp_event.xSubtype = 0;
ev->event.hp_lp_event.x.xSubtypeData =
subtype_data('M', 'F', 'C', 'E');
memcpy(ev->event.data.ce_msg.ce_msg, ce_msg, 12);
ev->event.data.ce_msg.completion = completion;
memcpy(ev->dma_data, dma_data, dma_data_length);
ev->dma_data_length = dma_data_length;
ev->remote_address = remote_address;
return signal_event(ev);
} }
/* /*
...@@ -401,18 +351,17 @@ static int dmaAndSignalCEMsg( char * ceMsg, void * token, void * dmaData, unsign ...@@ -401,18 +351,17 @@ static int dmaAndSignalCEMsg( char * ceMsg, void * token, void * dmaData, unsign
* this fails (why?), we'll simply force it off in a not-so-nice * this fails (why?), we'll simply force it off in a not-so-nice
* manner. * manner.
*/ */
static int shutdown( void ) static int shutdown(void)
{ {
int rc = kill_proc(1,SIGINT,1); int rc = kill_proc(1, SIGINT, 1);
if ( rc ) if (rc) {
{ printk(KERN_ALERT "mf.c: SIGINT to init failed (%d), "
printk( KERN_ALERT "mf.c: SIGINT to init failed (%d), hard shutdown commencing\n", rc ); "hard shutdown commencing\n", rc);
mf_powerOff(); mf_powerOff();
} } else
else printk(KERN_INFO "mf.c: init has been successfully notified "
printk( KERN_INFO "mf.c: init has been successfully notified to proceed with shutdown\n" ); "to proceed with shutdown\n");
return rc; return rc;
} }
...@@ -420,67 +369,64 @@ static int shutdown( void ) ...@@ -420,67 +369,64 @@ static int shutdown( void )
* The primary partition VSP object is sending us a new * The primary partition VSP object is sending us a new
* event flow. Handle it... * event flow. Handle it...
*/ */
static void intReceived( struct IoMFLpEvent * event ) static void intReceived(struct IoMFLpEvent *event)
{ {
int freeIt = 0; int freeIt = 0;
struct StackElement * two = NULL; struct pending_event *two = NULL;
/* ack the interrupt */ /* ack the interrupt */
event->xHvLpEvent.xRc = HvLpEvent_Rc_Good; event->hp_lp_event.xRc = HvLpEvent_Rc_Good;
HvCallEvent_ackLpEvent( &event->xHvLpEvent ); HvCallEvent_ackLpEvent(&event->hp_lp_event);
/* process interrupt */ /* process interrupt */
switch( event->xHvLpEvent.xSubtype ) switch (event->hp_lp_event.xSubtype) {
{
case 0: /* CE message */ case 0: /* CE message */
switch( event->xUnion.xCEMsgData.xCEMsg[3] ) switch (event->data.ce_msg.ce_msg[3]) {
{
case 0x5B: /* power control notification */ case 0x5B: /* power control notification */
if ( (event->xUnion.xCEMsgData.xCEMsg[5]&0x20) != 0 ) if ((event->data.ce_msg.ce_msg[5] & 0x20) != 0) {
{ printk(KERN_INFO "mf.c: Commencing partition shutdown\n");
printk( KERN_INFO "mf.c: Commencing partition shutdown\n" ); if (shutdown() == 0)
if ( shutdown() == 0 ) signal_ce_msg("\x00\x00\x00\xDB\x00\x00\x00\x00\x00\x00\x00\x00", NULL);
signalCEMsg( "\x00\x00\x00\xDB\x00\x00\x00\x00\x00\x00\x00\x00", NULL );
} }
break; break;
case 0xC0: /* get time */ case 0xC0: /* get time */
{ if ((pending_event_head == NULL) ||
if ( (head != NULL) && ( head->event.xUnion.xCEMsgData.xCEMsg[3] == 0x40 ) ) (pending_event_head->event.data.ce_msg.ce_msg[3]
{ != 0x40))
freeIt = 1; break;
if ( head->event.xUnion.xCEMsgData.xToken != 0 ) freeIt = 1;
{ if (pending_event_head->event.data.ce_msg.completion != 0) {
CeMsgCompleteHandler xHdlr = head->event.xUnion.xCEMsgData.xToken->xHdlr; CeMsgCompleteHandler handler = pending_event_head->event.data.ce_msg.completion->handler;
void * token = head->event.xUnion.xCEMsgData.xToken->xToken; void *token = pending_event_head->event.data.ce_msg.completion->token;
if (xHdlr != NULL) if (handler != NULL)
(*xHdlr)( token, &(event->xUnion.xCEMsgData) ); (*handler)(token, &(event->data.ce_msg));
}
}
} }
break; break;
} }
/* remove from queue */ /* remove from queue */
if ( freeIt == 1 ) if (freeIt == 1) {
{
unsigned long flags; unsigned long flags;
spin_lock_irqsave( &spinlock, flags );
if ( head != NULL ) spin_lock_irqsave(&pending_event_spinlock, flags);
{ if (pending_event_head != NULL) {
struct StackElement *oldHead = head; struct pending_event *oldHead =
head = head->next; pending_event_head;
two = head;
free( oldHead ); pending_event_head = pending_event_head->next;
two = pending_event_head;
free_pending_event(oldHead);
} }
spin_unlock_irqrestore( &spinlock, flags ); spin_unlock_irqrestore(&pending_event_spinlock, flags);
} }
/* send next waiting event */ /* send next waiting event */
if ( two != NULL ) if (two != NULL)
signalEvent( NULL ); signal_event(NULL);
break; break;
case 1: /* IT sys shutdown */ case 1: /* IT sys shutdown */
printk( KERN_INFO "mf.c: Commencing system shutdown\n" ); printk(KERN_INFO "mf.c: Commencing system shutdown\n");
shutdown(); shutdown();
break; break;
} }
...@@ -491,81 +437,74 @@ static void intReceived( struct IoMFLpEvent * event ) ...@@ -491,81 +437,74 @@ static void intReceived( struct IoMFLpEvent * event )
* of a flow we sent to them. If there are other flows queued * of a flow we sent to them. If there are other flows queued
* up, we must send another one now... * up, we must send another one now...
*/ */
static void ackReceived( struct IoMFLpEvent * event ) static void ackReceived(struct IoMFLpEvent *event)
{ {
unsigned long flags; unsigned long flags;
struct StackElement * two = NULL; struct pending_event * two = NULL;
unsigned long freeIt = 0; unsigned long freeIt = 0;
/* handle current event */ /* handle current event */
if ( head != NULL ) if (pending_event_head != NULL) {
{ switch (event->hp_lp_event.xSubtype) {
switch( event->xHvLpEvent.xSubtype )
{
case 0: /* CE msg */ case 0: /* CE msg */
if ( event->xUnion.xCEMsgData.xCEMsg[3] == 0x40 ) if (event->data.ce_msg.ce_msg[3] == 0x40) {
{ if (event->data.ce_msg.ce_msg[2] != 0) {
if ( event->xUnion.xCEMsgData.xCEMsg[2] != 0 )
{
freeIt = 1; freeIt = 1;
if ( head->event.xUnion.xCEMsgData.xToken != 0 ) if (pending_event_head->event.data.ce_msg.completion
{ != 0) {
CeMsgCompleteHandler xHdlr = head->event.xUnion.xCEMsgData.xToken->xHdlr; CeMsgCompleteHandler handler = pending_event_head->event.data.ce_msg.completion->handler;
void * token = head->event.xUnion.xCEMsgData.xToken->xToken; void *token = pending_event_head->event.data.ce_msg.completion->token;
if (xHdlr != NULL) if (handler != NULL)
(*xHdlr)( token, &(event->xUnion.xCEMsgData) ); (*handler)(token, &(event->data.ce_msg));
} }
} }
} else { } else
freeIt = 1; freeIt = 1;
}
break; break;
case 4: /* allocate */ case 4: /* allocate */
case 5: /* deallocate */ case 5: /* deallocate */
if ( head->hdlr != NULL ) if (pending_event_head->hdlr != NULL) {
{ union safe_cast mySafeCast;
union SafeCast mySafeCast;
mySafeCast.ptrAsU64 = event->xHvLpEvent.xCorrelationToken; mySafeCast.ptr_as_u64 = event->hp_lp_event.xCorrelationToken;
(*head->hdlr)( mySafeCast.ptr, event->xUnion.xAllocData.xCount ); (*pending_event_head->hdlr)(mySafeCast.ptr, event->data.alloc.count);
} }
freeIt = 1; freeIt = 1;
break; break;
case 6: case 6:
{ {
struct VspRspData *rsp = (struct VspRspData *)event->xUnion.xVspCmd.xTokenUnion.ptr; struct VspRspData *rsp = (struct VspRspData *)event->data.vsp_cmd.token.ptr;
if (rsp != NULL) if (rsp != NULL) {
{ if (rsp->response != NULL)
if (rsp->xResponse != NULL) memcpy(rsp->response, &(event->data.vsp_cmd), sizeof(event->data.vsp_cmd));
memcpy(rsp->xResponse, &(event->xUnion.xVspCmd), sizeof(event->xUnion.xVspCmd)); if (rsp->sem != NULL)
if (rsp->xSemaphore != NULL) up(rsp->sem);
up(rsp->xSemaphore); } else
} else { printk(KERN_ERR "mf.c: no rsp\n");
printk( KERN_ERR "mf.c: no rsp\n");
}
freeIt = 1; freeIt = 1;
} }
break; break;
} }
} }
else else
printk( KERN_ERR "mf.c: stack empty for receiving ack\n" ); printk(KERN_ERR "mf.c: stack empty for receiving ack\n");
/* remove from queue */ /* remove from queue */
spin_lock_irqsave( &spinlock, flags ); spin_lock_irqsave(&pending_event_spinlock, flags);
if (( head != NULL ) && ( freeIt == 1 )) if ((pending_event_head != NULL) && (freeIt == 1)) {
{ struct pending_event *oldHead = pending_event_head;
struct StackElement *oldHead = head;
head = head->next; pending_event_head = pending_event_head->next;
two = head; two = pending_event_head;
free( oldHead ); free_pending_event(oldHead);
} }
spin_unlock_irqrestore( &spinlock, flags ); spin_unlock_irqrestore(&pending_event_spinlock, flags);
/* send next waiting event */ /* send next waiting event */
if ( two != NULL ) if (two != NULL)
signalEvent( NULL ); signal_event(NULL);
} }
/* /*
...@@ -574,101 +513,94 @@ static void ackReceived( struct IoMFLpEvent * event ) ...@@ -574,101 +513,94 @@ static void ackReceived( struct IoMFLpEvent * event )
* parse it enough to know if it is an interrupt or an * parse it enough to know if it is an interrupt or an
* acknowledge. * acknowledge.
*/ */
static void hvHandler( struct HvLpEvent * event, struct pt_regs * regs ) static void hvHandler(struct HvLpEvent *event, struct pt_regs *regs)
{ {
if ( (event != NULL) && (event->xType == HvLpEvent_Type_MachineFac) ) if ((event != NULL) && (event->xType == HvLpEvent_Type_MachineFac)) {
{ switch(event->xFlags.xFunction) {
switch( event->xFlags.xFunction )
{
case HvLpEvent_Function_Ack: case HvLpEvent_Function_Ack:
ackReceived( (struct IoMFLpEvent *)event ); ackReceived((struct IoMFLpEvent *)event);
break; break;
case HvLpEvent_Function_Int: case HvLpEvent_Function_Int:
intReceived( (struct IoMFLpEvent *)event ); intReceived((struct IoMFLpEvent *)event);
break; break;
default: default:
printk( KERN_ERR "mf.c: non ack/int event received\n" ); printk(KERN_ERR "mf.c: non ack/int event received\n");
break; break;
} }
} } else
else printk(KERN_ERR "mf.c: alien event received\n");
printk( KERN_ERR "mf.c: alien event received\n" );
} }
/* /*
* Global kernel interface to allocate and seed events into the * Global kernel interface to allocate and seed events into the
* Hypervisor. * Hypervisor.
*/ */
void mf_allocateLpEvents( HvLpIndex targetLp, void mf_allocateLpEvents(HvLpIndex targetLp, HvLpEvent_Type type,
HvLpEvent_Type type, unsigned size, unsigned count, MFCompleteHandler hdlr,
unsigned size, void *userToken)
unsigned count,
MFCompleteHandler hdlr,
void * userToken )
{ {
struct StackElement * newElement = newStackElement(); struct pending_event *ev = new_pending_event();
int rc = 0; int rc;
if ( newElement == NULL ) if (ev == NULL) {
rc = -ENOMEM; rc = -ENOMEM;
else { } else {
union SafeCast mine; union safe_cast mine;
mine.ptr = userToken; mine.ptr = userToken;
newElement->event.xHvLpEvent.xSubtype = 4; ev->event.hp_lp_event.xSubtype = 4;
newElement->event.xHvLpEvent.xCorrelationToken = mine.ptrAsU64; ev->event.hp_lp_event.xCorrelationToken = mine.ptr_as_u64;
newElement->event.xHvLpEvent.x.xSubtypeData = ('M'<<24)+('F'<<16)+('M'<<8)+('A'<<0); ev->event.hp_lp_event.x.xSubtypeData =
newElement->event.xUnion.xAllocData.xTargetLp = targetLp; subtype_data('M', 'F', 'M', 'A');
newElement->event.xUnion.xAllocData.xType = type; ev->event.data.alloc.target_lp = targetLp;
newElement->event.xUnion.xAllocData.xSize = size; ev->event.data.alloc.type = type;
newElement->event.xUnion.xAllocData.xCount = count; ev->event.data.alloc.size = size;
newElement->hdlr = hdlr; ev->event.data.alloc.count = count;
rc = signalEvent(newElement); ev->hdlr = hdlr;
rc = signal_event(ev);
} }
if ((rc != 0) && (hdlr != NULL))
if ( (rc != 0) && (hdlr != NULL) ) (*hdlr)(userToken, rc);
(*hdlr)( userToken, rc );
} }
/* /*
* Global kernel interface to unseed and deallocate events already in * Global kernel interface to unseed and deallocate events already in
* Hypervisor. * Hypervisor.
*/ */
void mf_deallocateLpEvents( HvLpIndex targetLp, void mf_deallocateLpEvents(HvLpIndex targetLp, HvLpEvent_Type type,
HvLpEvent_Type type, unsigned count, MFCompleteHandler hdlr, void *userToken)
unsigned count,
MFCompleteHandler hdlr,
void * userToken )
{ {
struct StackElement * newElement = newStackElement(); struct pending_event *ev = new_pending_event();
int rc = 0; int rc;
if ( newElement == NULL ) if (ev == NULL)
rc = -ENOMEM; rc = -ENOMEM;
else { else {
union SafeCast mine; union safe_cast mine;
mine.ptr = userToken; mine.ptr = userToken;
newElement->event.xHvLpEvent.xSubtype = 5; ev->event.hp_lp_event.xSubtype = 5;
newElement->event.xHvLpEvent.xCorrelationToken = mine.ptrAsU64; ev->event.hp_lp_event.xCorrelationToken = mine.ptr_as_u64;
newElement->event.xHvLpEvent.x.xSubtypeData = ('M'<<24)+('F'<<16)+('M'<<8)+('D'<<0); ev->event.hp_lp_event.x.xSubtypeData =
newElement->event.xUnion.xAllocData.xTargetLp = targetLp; subtype_data('M', 'F', 'M', 'D');
newElement->event.xUnion.xAllocData.xType = type; ev->event.data.alloc.target_lp = targetLp;
newElement->event.xUnion.xAllocData.xCount = count; ev->event.data.alloc.type = type;
newElement->hdlr = hdlr; ev->event.data.alloc.count = count;
rc = signalEvent(newElement); ev->hdlr = hdlr;
rc = signal_event(ev);
} }
if ((rc != 0) && (hdlr != NULL))
if ( (rc != 0) && (hdlr != NULL) ) (*hdlr)(userToken, rc);
(*hdlr)( userToken, rc );
} }
/* /*
* Global kernel interface to tell the VSP object in the primary * Global kernel interface to tell the VSP object in the primary
* partition to power this partition off. * partition to power this partition off.
*/ */
void mf_powerOff( void ) void mf_powerOff(void)
{ {
printk( KERN_INFO "mf.c: Down it goes...\n" ); printk(KERN_INFO "mf.c: Down it goes...\n");
signalCEMsg( "\x00\x00\x00\x4D\x00\x00\x00\x00\x00\x00\x00\x00", NULL ); signal_ce_msg("\x00\x00\x00\x4D\x00\x00\x00\x00\x00\x00\x00\x00", NULL);
for (;;); for (;;);
} }
...@@ -676,111 +608,104 @@ void mf_powerOff( void ) ...@@ -676,111 +608,104 @@ void mf_powerOff( void )
* Global kernel interface to tell the VSP object in the primary * Global kernel interface to tell the VSP object in the primary
* partition to reboot this partition. * partition to reboot this partition.
*/ */
void mf_reboot( void ) void mf_reboot(void)
{ {
printk( KERN_INFO "mf.c: Preparing to bounce...\n" ); printk(KERN_INFO "mf.c: Preparing to bounce...\n");
signalCEMsg( "\x00\x00\x00\x4E\x00\x00\x00\x00\x00\x00\x00\x00", NULL ); signal_ce_msg("\x00\x00\x00\x4E\x00\x00\x00\x00\x00\x00\x00\x00", NULL);
for (;;); for (;;);
} }
/* /*
* Display a single word SRC onto the VSP control panel. * Display a single word SRC onto the VSP control panel.
*/ */
void mf_displaySrc( u32 word ) void mf_displaySrc(u32 word)
{ {
u8 ce[12]; u8 ce[12];
memcpy( ce, "\x00\x00\x00\x4A\x00\x00\x00\x01\x00\x00\x00\x00", 12 ); memcpy(ce, "\x00\x00\x00\x4A\x00\x00\x00\x01\x00\x00\x00\x00", 12);
ce[8] = word>>24; ce[8] = word >> 24;
ce[9] = word>>16; ce[9] = word >> 16;
ce[10] = word>>8; ce[10] = word >> 8;
ce[11] = word; ce[11] = word;
signalCEMsg( ce, NULL ); signal_ce_msg(ce, NULL);
} }
/* /*
* Display a single word SRC of the form "PROGXXXX" on the VSP control panel. * Display a single word SRC of the form "PROGXXXX" on the VSP control panel.
*/ */
void mf_displayProgress( u16 value ) void mf_displayProgress(u16 value)
{ {
u8 ce[12]; u8 ce[12];
u8 src[72]; u8 src[72];
memcpy( ce, "\x00\x00\x04\x4A\x00\x00\x00\x48\x00\x00\x00\x00", 12 ); memcpy(ce, "\x00\x00\x04\x4A\x00\x00\x00\x48\x00\x00\x00\x00", 12);
memcpy( src, memcpy(src, "\x01\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00"
"\x01\x00\x00\x01" "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
"\x00\x00\x00\x00" "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
"\x00\x00\x00\x00" "\x00\x00\x00\x00PROGxxxx ",
"\x00\x00\x00\x00" 72);
"\x00\x00\x00\x00" src[6] = value >> 8;
"\x00\x00\x00\x00" src[7] = value & 255;
"\x00\x00\x00\x00" src[44] = "0123456789ABCDEF"[(value >> 12) & 15];
"\x00\x00\x00\x00" src[45] = "0123456789ABCDEF"[(value >> 8) & 15];
"\x00\x00\x00\x00" src[46] = "0123456789ABCDEF"[(value >> 4) & 15];
"\x00\x00\x00\x00" src[47] = "0123456789ABCDEF"[value & 15];
"PROGxxxx" dma_and_signal_ce_msg(ce, NULL, src, sizeof(src), 9 * 64 * 1024);
" ",
72 );
src[6] = value>>8;
src[7] = value&255;
src[44] = "0123456789ABCDEF"[(value>>12)&15];
src[45] = "0123456789ABCDEF"[(value>>8)&15];
src[46] = "0123456789ABCDEF"[(value>>4)&15];
src[47] = "0123456789ABCDEF"[value&15];
dmaAndSignalCEMsg( ce, NULL, src, sizeof(src), 9*64*1024 );
} }
/* /*
* Clear the VSP control panel. Used to "erase" an SRC that was * Clear the VSP control panel. Used to "erase" an SRC that was
* previously displayed. * previously displayed.
*/ */
void mf_clearSrc( void ) void mf_clearSrc(void)
{ {
signalCEMsg( "\x00\x00\x00\x4B\x00\x00\x00\x00\x00\x00\x00\x00", NULL ); signal_ce_msg("\x00\x00\x00\x4B\x00\x00\x00\x00\x00\x00\x00\x00", NULL);
} }
/* /*
* Initialization code here. * Initialization code here.
*/ */
void mf_init( void ) void mf_init(void)
{ {
int i; int i;
/* initialize */ /* initialize */
spin_lock_init( &spinlock ); spin_lock_init(&pending_event_spinlock);
for ( i = 0; i < sizeof(prealloc)/sizeof(*prealloc); ++i ) for (i = 0;
free( &prealloc[i] ); i < sizeof(pending_event_prealloc) / sizeof(*pending_event_prealloc);
HvLpEvent_registerHandler( HvLpEvent_Type_MachineFac, &hvHandler ); ++i)
free_pending_event(&pending_event_prealloc[i]);
HvLpEvent_registerHandler(HvLpEvent_Type_MachineFac, &hvHandler);
/* virtual continue ack */ /* virtual continue ack */
signalCEMsg( "\x00\x00\x00\x57\x00\x00\x00\x00\x00\x00\x00\x00", NULL ); signal_ce_msg("\x00\x00\x00\x57\x00\x00\x00\x00\x00\x00\x00\x00", NULL);
/* initialization complete */ /* initialization complete */
printk( KERN_NOTICE "mf.c: iSeries Linux LPAR Machine Facilities initialized\n" ); printk(KERN_NOTICE "mf.c: iSeries Linux LPAR Machine Facilities initialized\n");
iSeries_proc_callback(&mf_proc_init); iSeries_proc_callback(&mf_proc_init);
} }
void mf_setSide(char side) void mf_setSide(char side)
{ {
int rc = 0; u64 newSide;
u64 newSide = 0;
struct VspCmdData myVspCmd; struct VspCmdData myVspCmd;
memset(&myVspCmd, 0, sizeof(myVspCmd)); memset(&myVspCmd, 0, sizeof(myVspCmd));
if (side == 'A') switch (side) {
newSide = 0; case 'A': newSide = 0;
else if (side == 'B') break;
newSide = 1; case 'B': newSide = 1;
else if (side == 'C') break;
newSide = 2; case 'C': newSide = 2;
else break;
newSide = 3; default: newSide = 3;
break;
myVspCmd.xSubData.xFunction02SelectIplTypeIn.xIplType = newSide; }
myVspCmd.xCmd = 10; myVspCmd.sub_data.ipl_type = newSide;
myVspCmd.cmd = 10;
rc = signalVspInstruction(&myVspCmd); (void)signal_vsp_instruction(&myVspCmd);
} }
char mf_getSide(void) char mf_getSide(void)
...@@ -790,91 +715,82 @@ char mf_getSide(void) ...@@ -790,91 +715,82 @@ char mf_getSide(void)
struct VspCmdData myVspCmd; struct VspCmdData myVspCmd;
memset(&myVspCmd, 0, sizeof(myVspCmd)); memset(&myVspCmd, 0, sizeof(myVspCmd));
myVspCmd.xCmd = 2; myVspCmd.cmd = 2;
myVspCmd.xSubData.xFunction02SelectIplTypeIn.xIplType = 0; myVspCmd.sub_data.ipl_type = 0;
mb(); mb();
rc = signalVspInstruction(&myVspCmd); rc = signal_vsp_instruction(&myVspCmd);
if (rc != 0) if (rc != 0)
{
return returnValue; return returnValue;
} else {
if (myVspCmd.xRc == 0) if (myVspCmd.result_code == 0) {
{ switch (myVspCmd.sub_data.ipl_type) {
if (myVspCmd.xSubData.xGetIplTypeOut.xIplType == 0) case 0: returnValue = 'A';
returnValue = 'A'; break;
else if (myVspCmd.xSubData.xGetIplTypeOut.xIplType == 1) case 1: returnValue = 'B';
returnValue = 'B'; break;
else if (myVspCmd.xSubData.xGetIplTypeOut.xIplType == 2) case 2: returnValue = 'C';
returnValue = 'C'; break;
else default: returnValue = 'D';
returnValue = 'D'; break;
} }
} }
return returnValue; return returnValue;
} }
void mf_getSrcHistory(char *buffer, int size) void mf_getSrcHistory(char *buffer, int size)
{ {
/* struct IplTypeReturnStuff returnStuff; #if 0
struct StackElement * newElement = newStackElement(); struct IplTypeReturnStuff returnStuff;
int rc = 0; struct pending_event *ev = new_pending_event();
char *pages[4]; int rc = 0;
char *pages[4];
pages[0] = kmalloc(4096, GFP_ATOMIC);
pages[1] = kmalloc(4096, GFP_ATOMIC); pages[0] = kmalloc(4096, GFP_ATOMIC);
pages[2] = kmalloc(4096, GFP_ATOMIC); pages[1] = kmalloc(4096, GFP_ATOMIC);
pages[3] = kmalloc(4096, GFP_ATOMIC); pages[2] = kmalloc(4096, GFP_ATOMIC);
if (( newElement == NULL ) || (pages[0] == NULL) || (pages[1] == NULL) || (pages[2] == NULL) || (pages[3] == NULL)) pages[3] = kmalloc(4096, GFP_ATOMIC);
rc = -ENOMEM; if ((ev == NULL) || (pages[0] == NULL) || (pages[1] == NULL)
else || (pages[2] == NULL) || (pages[3] == NULL))
{ return -ENOMEM;
returnStuff.xType = 0;
returnStuff.xRc = 0; returnStuff.xType = 0;
returnStuff.xDone = 0; returnStuff.xRc = 0;
newElement->event.xHvLpEvent.xSubtype = 6; returnStuff.xDone = 0;
newElement->event.xHvLpEvent.x.xSubtypeData = ('M'<<24)+('F'<<16)+('V'<<8)+('I'<<0); ev->event.hp_lp_event.xSubtype = 6;
newElement->event.xUnion.xVspCmd.xEvent = &returnStuff; ev->event.hp_lp_event.x.xSubtypeData =
newElement->event.xUnion.xVspCmd.xCmd = 4; subtype_data('M', 'F', 'V', 'I');
newElement->event.xUnion.xVspCmd.xLpIndex = HvLpConfig_getLpIndex(); ev->event.data.vsp_cmd.xEvent = &returnStuff;
newElement->event.xUnion.xVspCmd.xRc = 0xFF; ev->event.data.vsp_cmd.cmd = 4;
newElement->event.xUnion.xVspCmd.xReserved1 = 0; ev->event.data.vsp_cmd.lp_index = HvLpConfig_getLpIndex();
newElement->event.xUnion.xVspCmd.xSubData.xGetSrcHistoryIn.xPage[0] = (0x8000000000000000ULL | virt_to_absolute((unsigned long)pages[0])); ev->event.data.vsp_cmd.result_code = 0xFF;
newElement->event.xUnion.xVspCmd.xSubData.xGetSrcHistoryIn.xPage[1] = (0x8000000000000000ULL | virt_to_absolute((unsigned long)pages[1])); ev->event.data.vsp_cmd.reserved = 0;
newElement->event.xUnion.xVspCmd.xSubData.xGetSrcHistoryIn.xPage[2] = (0x8000000000000000ULL | virt_to_absolute((unsigned long)pages[2])); ev->event.data.vsp_cmd.sub_data.page[0] =
newElement->event.xUnion.xVspCmd.xSubData.xGetSrcHistoryIn.xPage[3] = (0x8000000000000000ULL | virt_to_absolute((unsigned long)pages[3])); (0x8000000000000000ULL | virt_to_absolute((unsigned long)pages[0]));
mb(); ev->event.data.vsp_cmd.sub_data.page[1] =
rc = signalEvent(newElement); (0x8000000000000000ULL | virt_to_absolute((unsigned long)pages[1]));
} ev->event.data.vsp_cmd.sub_data.page[2] =
(0x8000000000000000ULL | virt_to_absolute((unsigned long)pages[2]));
if (rc != 0) ev->event.data.vsp_cmd.sub_data.page[3] =
{ (0x8000000000000000ULL | virt_to_absolute((unsigned long)pages[3]));
return; mb();
} if (signal_event(ev) != 0)
else return;
{
while (returnStuff.xDone != 1) while (returnStuff.xDone != 1)
{ udelay(10);
udelay(10); if (returnStuff.xRc == 0)
} memcpy(buffer, pages[0], size);
kfree(pages[0]);
if (returnStuff.xRc == 0) kfree(pages[1]);
{ kfree(pages[2]);
memcpy(buffer, pages[0], size); kfree(pages[3]);
} #endif
}
kfree(pages[0]);
kfree(pages[1]);
kfree(pages[2]);
kfree(pages[3]);*/
} }
void mf_setCmdLine(const char *cmdline, int size, u64 side) void mf_setCmdLine(const char *cmdline, int size, u64 side)
{ {
struct VspCmdData myVspCmd; struct VspCmdData myVspCmd;
int rc = 0;
dma_addr_t dma_addr = 0; dma_addr_t dma_addr = 0;
char *page = pci_alloc_consistent(iSeries_vio_dev, size, &dma_addr); char *page = pci_alloc_consistent(iSeries_vio_dev, size, &dma_addr);
...@@ -886,13 +802,13 @@ void mf_setCmdLine(const char *cmdline, int size, u64 side) ...@@ -886,13 +802,13 @@ void mf_setCmdLine(const char *cmdline, int size, u64 side)
copy_from_user(page, cmdline, size); copy_from_user(page, cmdline, size);
memset(&myVspCmd, 0, sizeof(myVspCmd)); memset(&myVspCmd, 0, sizeof(myVspCmd));
myVspCmd.xCmd = 31; myVspCmd.cmd = 31;
myVspCmd.xSubData.xSetKernelCmdLineIn.xToken = dma_addr; myVspCmd.sub_data.kern.token = dma_addr;
myVspCmd.xSubData.xSetKernelCmdLineIn.xAddressType = HvLpDma_AddressType_TceIndex; myVspCmd.sub_data.kern.address_type = HvLpDma_AddressType_TceIndex;
myVspCmd.xSubData.xSetKernelCmdLineIn.xSide = side; myVspCmd.sub_data.kern.side = side;
myVspCmd.xSubData.xSetKernelCmdLineIn.xTransferLength = size; myVspCmd.sub_data.kern.length = size;
mb(); mb();
rc = signalVspInstruction(&myVspCmd); (void)signal_vsp_instruction(&myVspCmd);
pci_free_consistent(iSeries_vio_dev, size, page, dma_addr); pci_free_consistent(iSeries_vio_dev, size, page, dma_addr);
} }
...@@ -900,31 +816,29 @@ void mf_setCmdLine(const char *cmdline, int size, u64 side) ...@@ -900,31 +816,29 @@ void mf_setCmdLine(const char *cmdline, int size, u64 side)
int mf_getCmdLine(char *cmdline, int *size, u64 side) int mf_getCmdLine(char *cmdline, int *size, u64 side)
{ {
struct VspCmdData myVspCmd; struct VspCmdData myVspCmd;
int rc = 0; int rc;
int len = *size; int len = *size;
dma_addr_t dma_addr = pci_map_single(iSeries_vio_dev, cmdline, *size, PCI_DMA_FROMDEVICE); dma_addr_t dma_addr;
memset(cmdline, 0, *size); dma_addr = pci_map_single(iSeries_vio_dev, cmdline, len,
PCI_DMA_FROMDEVICE);
memset(cmdline, 0, len);
memset(&myVspCmd, 0, sizeof(myVspCmd)); memset(&myVspCmd, 0, sizeof(myVspCmd));
myVspCmd.xCmd = 33; myVspCmd.cmd = 33;
myVspCmd.xSubData.xGetKernelCmdLineIn.xToken = dma_addr; myVspCmd.sub_data.kern.token = dma_addr;
myVspCmd.xSubData.xGetKernelCmdLineIn.xAddressType = HvLpDma_AddressType_TceIndex; myVspCmd.sub_data.kern.address_type = HvLpDma_AddressType_TceIndex;
myVspCmd.xSubData.xGetKernelCmdLineIn.xSide = side; myVspCmd.sub_data.kern.side = side;
myVspCmd.xSubData.xGetKernelCmdLineIn.xTransferLength = *size; myVspCmd.sub_data.kern.length = len;
mb(); mb();
rc = signalVspInstruction(&myVspCmd); rc = signal_vsp_instruction(&myVspCmd);
if ( ! rc ) {
if (myVspCmd.xRc == 0) if (rc == 0) {
{ if (myVspCmd.result_code == 0)
len = myVspCmd.xSubData.xGetKernelCmdLineOut.xTransferLength; len = myVspCmd.sub_data.length_out;
} #if 0
/* else else
{
memcpy(cmdline, "Bad cmdline", 11); memcpy(cmdline, "Bad cmdline", 11);
} #endif
*/
} }
pci_unmap_single(iSeries_vio_dev, dma_addr, *size, PCI_DMA_FROMDEVICE); pci_unmap_single(iSeries_vio_dev, dma_addr, *size, PCI_DMA_FROMDEVICE);
...@@ -936,10 +850,8 @@ int mf_getCmdLine(char *cmdline, int *size, u64 side) ...@@ -936,10 +850,8 @@ int mf_getCmdLine(char *cmdline, int *size, u64 side)
int mf_setVmlinuxChunk(const char *buffer, int size, int offset, u64 side) int mf_setVmlinuxChunk(const char *buffer, int size, int offset, u64 side)
{ {
struct VspCmdData myVspCmd; struct VspCmdData myVspCmd;
int rc = 0; int rc;
dma_addr_t dma_addr = 0; dma_addr_t dma_addr = 0;
char *page = pci_alloc_consistent(iSeries_vio_dev, size, &dma_addr); char *page = pci_alloc_consistent(iSeries_vio_dev, size, &dma_addr);
if (page == NULL) { if (page == NULL) {
...@@ -950,23 +862,19 @@ int mf_setVmlinuxChunk(const char *buffer, int size, int offset, u64 side) ...@@ -950,23 +862,19 @@ int mf_setVmlinuxChunk(const char *buffer, int size, int offset, u64 side)
copy_from_user(page, buffer, size); copy_from_user(page, buffer, size);
memset(&myVspCmd, 0, sizeof(myVspCmd)); memset(&myVspCmd, 0, sizeof(myVspCmd));
myVspCmd.xCmd = 30; myVspCmd.cmd = 30;
myVspCmd.xSubData.xGetKernelImageIn.xToken = dma_addr; myVspCmd.sub_data.kern.token = dma_addr;
myVspCmd.xSubData.xGetKernelImageIn.xAddressType = HvLpDma_AddressType_TceIndex; myVspCmd.sub_data.kern.address_type = HvLpDma_AddressType_TceIndex;
myVspCmd.xSubData.xGetKernelImageIn.xSide = side; myVspCmd.sub_data.kern.side = side;
myVspCmd.xSubData.xGetKernelImageIn.xOffset = offset; myVspCmd.sub_data.kern.offset = offset;
myVspCmd.xSubData.xGetKernelImageIn.xTransferLength = size; myVspCmd.sub_data.kern.length = size;
mb(); mb();
rc = signalVspInstruction(&myVspCmd); rc = signal_vsp_instruction(&myVspCmd);
if (rc == 0) {
if (rc == 0) if (myVspCmd.result_code == 0)
{
if (myVspCmd.xRc == 0)
{
rc = 0; rc = 0;
} else { else
rc = -ENOMEM; rc = -ENOMEM;
}
} }
pci_free_consistent(iSeries_vio_dev, size, page, dma_addr); pci_free_consistent(iSeries_vio_dev, size, page, dma_addr);
...@@ -977,31 +885,27 @@ int mf_setVmlinuxChunk(const char *buffer, int size, int offset, u64 side) ...@@ -977,31 +885,27 @@ int mf_setVmlinuxChunk(const char *buffer, int size, int offset, u64 side)
int mf_getVmlinuxChunk(char *buffer, int *size, int offset, u64 side) int mf_getVmlinuxChunk(char *buffer, int *size, int offset, u64 side)
{ {
struct VspCmdData myVspCmd; struct VspCmdData myVspCmd;
int rc = 0; int rc;
int len = *size; int len = *size;
dma_addr_t dma_addr;
dma_addr_t dma_addr = pci_map_single(iSeries_vio_dev, buffer, *size, PCI_DMA_FROMDEVICE); dma_addr = pci_map_single(iSeries_vio_dev, buffer, len,
PCI_DMA_FROMDEVICE);
memset(buffer, 0, len); memset(buffer, 0, len);
memset(&myVspCmd, 0, sizeof(myVspCmd)); memset(&myVspCmd, 0, sizeof(myVspCmd));
myVspCmd.xCmd = 32; myVspCmd.cmd = 32;
myVspCmd.xSubData.xGetKernelImageIn.xToken = dma_addr; myVspCmd.sub_data.kern.token = dma_addr;
myVspCmd.xSubData.xGetKernelImageIn.xAddressType = HvLpDma_AddressType_TceIndex; myVspCmd.sub_data.kern.address_type = HvLpDma_AddressType_TceIndex;
myVspCmd.xSubData.xGetKernelImageIn.xSide = side; myVspCmd.sub_data.kern.side = side;
myVspCmd.xSubData.xGetKernelImageIn.xOffset = offset; myVspCmd.sub_data.kern.offset = offset;
myVspCmd.xSubData.xGetKernelImageIn.xTransferLength = len; myVspCmd.sub_data.kern.length = len;
mb(); mb();
rc = signalVspInstruction(&myVspCmd); rc = signal_vsp_instruction(&myVspCmd);
if (rc == 0) {
if (rc == 0) if (myVspCmd.result_code == 0)
{ *size = myVspCmd.sub_data.length_out;
if (myVspCmd.xRc == 0) else
{
*size = myVspCmd.xSubData.xGetKernelImageOut.xTransferLength;
} else {
rc = -ENOMEM; rc = -ENOMEM;
}
} }
pci_unmap_single(iSeries_vio_dev, dma_addr, len, PCI_DMA_FROMDEVICE); pci_unmap_single(iSeries_vio_dev, dma_addr, len, PCI_DMA_FROMDEVICE);
...@@ -1015,12 +919,11 @@ int mf_setRtcTime(unsigned long time) ...@@ -1015,12 +919,11 @@ int mf_setRtcTime(unsigned long time)
to_tm(time, &tm); to_tm(time, &tm);
return mf_setRtc( &tm ); return mf_setRtc(&tm);
} }
struct RtcTimeData struct RtcTimeData {
{ struct semaphore *sem;
struct semaphore *xSemaphore;
struct CeMsgData xCeMsg; struct CeMsgData xCeMsg;
int xRc; int xRc;
}; };
...@@ -1030,26 +933,23 @@ void getRtcTimeComplete(void * token, struct CeMsgData *ceMsg) ...@@ -1030,26 +933,23 @@ void getRtcTimeComplete(void * token, struct CeMsgData *ceMsg)
struct RtcTimeData *rtc = (struct RtcTimeData *)token; struct RtcTimeData *rtc = (struct RtcTimeData *)token;
memcpy(&(rtc->xCeMsg), ceMsg, sizeof(rtc->xCeMsg)); memcpy(&(rtc->xCeMsg), ceMsg, sizeof(rtc->xCeMsg));
rtc->xRc = 0; rtc->xRc = 0;
up(rtc->xSemaphore); up(rtc->sem);
} }
static unsigned long lastsec = 1; static unsigned long lastsec = 1;
int mf_getRtcTime(unsigned long *time) int mf_getRtcTime(unsigned long *time)
{ {
/* unsigned long usec, tsec; */
u32 dataWord1 = *((u32 *)(&xSpCommArea.xBcdTimeAtIplStart)); u32 dataWord1 = *((u32 *)(&xSpCommArea.xBcdTimeAtIplStart));
u32 dataWord2 = *(((u32 *)&(xSpCommArea.xBcdTimeAtIplStart)) + 1); u32 dataWord2 = *(((u32 *)&(xSpCommArea.xBcdTimeAtIplStart)) + 1);
int year = 1970; int year = 1970;
int year1 = ( dataWord1 >> 24 ) & 0x000000FF; int year1 = (dataWord1 >> 24) & 0x000000FF;
int year2 = ( dataWord1 >> 16 ) & 0x000000FF; int year2 = (dataWord1 >> 16) & 0x000000FF;
int sec = ( dataWord1 >> 8 ) & 0x000000FF; int sec = (dataWord1 >> 8) & 0x000000FF;
int min = dataWord1 & 0x000000FF; int min = dataWord1 & 0x000000FF;
int hour = ( dataWord2 >> 24 ) & 0x000000FF; int hour = (dataWord2 >> 24) & 0x000000FF;
int day = ( dataWord2 >> 8 ) & 0x000000FF; int day = (dataWord2 >> 8) & 0x000000FF;
int mon = dataWord2 & 0x000000FF; int mon = dataWord2 & 0x000000FF;
BCD_TO_BIN(sec); BCD_TO_BIN(sec);
...@@ -1062,49 +962,41 @@ int mf_getRtcTime(unsigned long *time) ...@@ -1062,49 +962,41 @@ int mf_getRtcTime(unsigned long *time)
year = year1 * 100 + year2; year = year1 * 100 + year2;
*time = mktime(year, mon, day, hour, min, sec); *time = mktime(year, mon, day, hour, min, sec);
*time += (jiffies / HZ);
*time += ( jiffies / HZ );
/* Now THIS is a nasty hack! /*
* Now THIS is a nasty hack!
* It ensures that the first two calls to mf_getRtcTime get different * It ensures that the first two calls to mf_getRtcTime get different
* answers. That way the loop in init_time (time.c) will not think * answers. That way the loop in init_time (time.c) will not think
* the clock is stuck. * the clock is stuck.
*/ */
if ( lastsec ) { if (lastsec) {
*time -= lastsec; *time -= lastsec;
--lastsec; --lastsec;
} }
return 0; return 0;
} }
int mf_getRtc( struct rtc_time * tm ) int mf_getRtc(struct rtc_time *tm)
{ {
struct CeMsgCompleteData ceComplete; struct CeMsgCompleteData ceComplete;
struct RtcTimeData rtcData; struct RtcTimeData rtcData;
int rc = 0; int rc;
DECLARE_MUTEX_LOCKED(Semaphore); DECLARE_MUTEX_LOCKED(Semaphore);
memset(&ceComplete, 0, sizeof(ceComplete)); memset(&ceComplete, 0, sizeof(ceComplete));
memset(&rtcData, 0, sizeof(rtcData)); memset(&rtcData, 0, sizeof(rtcData));
rtcData.sem = &Semaphore;
rtcData.xSemaphore = &Semaphore; ceComplete.handler = &getRtcTimeComplete;
ceComplete.token = (void *)&rtcData;
ceComplete.xHdlr = &getRtcTimeComplete; rc = signal_ce_msg("\x00\x00\x00\x40\x00\x00\x00\x00\x00\x00\x00\x00",
ceComplete.xToken = (void *)&rtcData; &ceComplete);
if (rc == 0) {
rc = signalCEMsg( "\x00\x00\x00\x40\x00\x00\x00\x00\x00\x00\x00\x00", &ceComplete );
if ( rc == 0 )
{
down(&Semaphore); down(&Semaphore);
if ( rtcData.xRc == 0) if (rtcData.xRc == 0) {
{ if ((rtcData.xCeMsg.ce_msg[2] == 0xa9) ||
if ( ( rtcData.xCeMsg.xCEMsg[2] == 0xa9 ) || (rtcData.xCeMsg.ce_msg[2] == 0xaf)) {
( rtcData.xCeMsg.xCEMsg[2] == 0xaf ) ) {
/* TOD clock is not set */ /* TOD clock is not set */
tm->tm_sec = 1; tm->tm_sec = 1;
tm->tm_min = 1; tm->tm_min = 1;
...@@ -1112,16 +1004,16 @@ int mf_getRtc( struct rtc_time * tm ) ...@@ -1112,16 +1004,16 @@ int mf_getRtc( struct rtc_time * tm )
tm->tm_mday = 10; tm->tm_mday = 10;
tm->tm_mon = 8; tm->tm_mon = 8;
tm->tm_year = 71; tm->tm_year = 71;
mf_setRtc( tm ); mf_setRtc(tm);
} }
{ {
u32 dataWord1 = *((u32 *)(rtcData.xCeMsg.xCEMsg+4)); u32 dataWord1 = *((u32 *)(rtcData.xCeMsg.ce_msg+4));
u32 dataWord2 = *((u32 *)(rtcData.xCeMsg.xCEMsg+8)); u32 dataWord2 = *((u32 *)(rtcData.xCeMsg.ce_msg+8));
u8 year = (dataWord1 >> 16 ) & 0x000000FF; u8 year = (dataWord1 >> 16) & 0x000000FF;
u8 sec = ( dataWord1 >> 8 ) & 0x000000FF; u8 sec = (dataWord1 >> 8) & 0x000000FF;
u8 min = dataWord1 & 0x000000FF; u8 min = dataWord1 & 0x000000FF;
u8 hour = ( dataWord2 >> 24 ) & 0x000000FF; u8 hour = (dataWord2 >> 24) & 0x000000FF;
u8 day = ( dataWord2 >> 8 ) & 0x000000FF; u8 day = (dataWord2 >> 8) & 0x000000FF;
u8 mon = dataWord2 & 0x000000FF; u8 mon = dataWord2 & 0x000000FF;
BCD_TO_BIN(sec); BCD_TO_BIN(sec);
...@@ -1131,7 +1023,7 @@ int mf_getRtc( struct rtc_time * tm ) ...@@ -1131,7 +1023,7 @@ int mf_getRtc( struct rtc_time * tm )
BCD_TO_BIN(mon); BCD_TO_BIN(mon);
BCD_TO_BIN(year); BCD_TO_BIN(year);
if ( year <= 69 ) if (year <= 69)
year += 100; year += 100;
tm->tm_sec = sec; tm->tm_sec = sec;
...@@ -1154,17 +1046,14 @@ int mf_getRtc( struct rtc_time * tm ) ...@@ -1154,17 +1046,14 @@ int mf_getRtc( struct rtc_time * tm )
tm->tm_wday = 0; tm->tm_wday = 0;
tm->tm_yday = 0; tm->tm_yday = 0;
tm->tm_isdst = 0; tm->tm_isdst = 0;
} }
return rc; return rc;
} }
int mf_setRtc(struct rtc_time * tm) int mf_setRtc(struct rtc_time * tm)
{ {
char ceTime[12] = "\x00\x00\x00\x41\x00\x00\x00\x00\x00\x00\x00\x00"; char ceTime[12] = "\x00\x00\x00\x41\x00\x00\x00\x00\x00\x00\x00\x00";
int rc = 0;
u8 day, mon, hour, min, sec, y1, y2; u8 day, mon, hour, min, sec, y1, y2;
unsigned year; unsigned year;
...@@ -1194,10 +1083,5 @@ int mf_setRtc(struct rtc_time * tm) ...@@ -1194,10 +1083,5 @@ int mf_setRtc(struct rtc_time * tm)
ceTime[10] = day; ceTime[10] = day;
ceTime[11] = mon; ceTime[11] = mon;
rc = signalCEMsg( ceTime, NULL ); return signal_ce_msg(ceTime, NULL);
return rc;
} }
...@@ -66,32 +66,31 @@ _GLOBAL(get_sp) ...@@ -66,32 +66,31 @@ _GLOBAL(get_sp)
blr blr
#ifdef CONFIG_PPC_ISERIES #ifdef CONFIG_PPC_ISERIES
/* unsigned long __no_use_save_flags(void) */ /* unsigned long local_save_flags(void) */
_GLOBAL(__no_use_save_flags) _GLOBAL(local_get_flags)
#warning FIX ISERIES lbz r3,PACAPROCENABLED(r13)
mfspr r4,SPRG3
lbz r3,PACAPROCENABLED(r4)
blr blr
/* void __no_use_restore_flags(unsigned long flags) */ /* unsigned long local_irq_disable(void) */
_GLOBAL(__no_use_restore_flags) _GLOBAL(local_irq_disable)
/* lbz r3,PACAPROCENABLED(r13)
* Just set/clear the MSR_EE bit through restore/flags but do not li r4,0
* change anything else. This is needed by the RT system and makes stb r4,PACAPROCENABLED(r13)
* sense anyway. blr /* Done */
* -- Cort
*/ /* void local_irq_restore(unsigned long flags) */
#warning FIX ISERIES _GLOBAL(local_irq_restore)
mfspr r6,SPRG3 lbz r5,PACAPROCENABLED(r13)
lbz r5,PACAPROCENABLED(r6)
/* Check if things are setup the way we want _already_. */ /* Check if things are setup the way we want _already_. */
cmpw 0,r3,r5 cmpw 0,r3,r5
beqlr beqlr
/* are we enabling interrupts? */ /* are we enabling interrupts? */
cmpi 0,r3,0 cmpi 0,r3,0
stb r3,PACAPROCENABLED(r6) stb r3,PACAPROCENABLED(r13)
beqlr beqlr
/* Check pending interrupts */ /* Check pending interrupts */
/* A decrementer, IPI or PMC interrupt may have occurred
* while we were in the hypervisor (which enables) */
CHECKANYINT(r4,r5) CHECKANYINT(r4,r5)
beqlr beqlr
...@@ -101,35 +100,8 @@ _GLOBAL(__no_use_restore_flags) ...@@ -101,35 +100,8 @@ _GLOBAL(__no_use_restore_flags)
li r0,0x5555 li r0,0x5555
sc sc
blr blr
#endif /* CONFIG_PPC_ISERIES */
_GLOBAL(__no_use_cli)
#warning FIX ISERIES
mfspr r5,SPRG3
lbz r3,PACAPROCENABLED(r5)
li r4,0
stb r4,PACAPROCENABLED(r5)
blr /* Done */
_GLOBAL(__no_use_sti)
#warning FIX ISERIES
mfspr r6,SPRG3
li r3,1
stb r3,PACAPROCENABLED(r6)
/* Check for pending interrupts
* A decrementer, IPI or PMC interrupt may have occurred
* while we were in the hypervisor (which enables)
*/
CHECKANYINT(r4,r5)
beqlr
/*
* Handle pending interrupts in interrupt context
*/
li r0,0x5555
sc
blr
#endif
/* /*
* Flush instruction cache. * Flush instruction cache.
*/ */
...@@ -595,6 +567,10 @@ SYSCALL(dup) ...@@ -595,6 +567,10 @@ SYSCALL(dup)
SYSCALL(execve) SYSCALL(execve)
SYSCALL(waitpid) SYSCALL(waitpid)
#ifdef CONFIG_PPC_ISERIES /* hack hack hack */
#define ppc_rtas sys_ni_syscall
#endif
/* Why isn't this a) automatic, b) written in 'C'? */ /* Why isn't this a) automatic, b) written in 'C'? */
.balign 8 .balign 8
_GLOBAL(sys_call_table32) _GLOBAL(sys_call_table32)
......
...@@ -48,11 +48,13 @@ ...@@ -48,11 +48,13 @@
/* #define MONITOR_TCE 1 */ /* Turn on to sanity check TCE generation. */ /* #define MONITOR_TCE 1 */ /* Turn on to sanity check TCE generation. */
#ifdef CONFIG_PPC_PSERIES
/* Initialize so this guy does not end up in the BSS section. /* Initialize so this guy does not end up in the BSS section.
* Only used to pass OF initialization data set in prom.c into the main * Only used to pass OF initialization data set in prom.c into the main
* kernel code -- data ultimately copied into tceTables[]. * kernel code -- data ultimately copied into tceTables[].
*/ */
extern struct _of_tce_table of_tce_table[]; extern struct _of_tce_table of_tce_table[];
#endif
extern struct pci_controller* hose_head; extern struct pci_controller* hose_head;
extern struct pci_controller** hose_tail; extern struct pci_controller** hose_tail;
...@@ -98,7 +100,7 @@ void free_tce_range_nolock(struct TceTable *, ...@@ -98,7 +100,7 @@ void free_tce_range_nolock(struct TceTable *,
unsigned order ); unsigned order );
/* allocates a range of tces and sets them to the pages */ /* allocates a range of tces and sets them to the pages */
inline dma_addr_t get_tces( struct TceTable *, static inline dma_addr_t get_tces( struct TceTable *,
unsigned order, unsigned order,
void *page, void *page,
unsigned numPages, unsigned numPages,
...@@ -210,7 +212,7 @@ static void tce_build_pSeries(struct TceTable *tbl, long tcenum, ...@@ -210,7 +212,7 @@ static void tce_build_pSeries(struct TceTable *tbl, long tcenum,
* Build a TceTable structure. This contains a multi-level bit map which * Build a TceTable structure. This contains a multi-level bit map which
* is used to manage allocation of the tce space. * is used to manage allocation of the tce space.
*/ */
struct TceTable *build_tce_table( struct TceTable * tbl ) static struct TceTable *build_tce_table( struct TceTable * tbl )
{ {
unsigned long bits, bytes, totalBytes; unsigned long bits, bytes, totalBytes;
unsigned long numBits[NUM_TCE_LEVELS], numBytes[NUM_TCE_LEVELS]; unsigned long numBits[NUM_TCE_LEVELS], numBytes[NUM_TCE_LEVELS];
...@@ -518,7 +520,7 @@ static long test_tce_range( struct TceTable *tbl, long tcenum, unsigned order ) ...@@ -518,7 +520,7 @@ static long test_tce_range( struct TceTable *tbl, long tcenum, unsigned order )
return retval; return retval;
} }
inline dma_addr_t get_tces( struct TceTable *tbl, unsigned order, void *page, unsigned numPages, int direction ) static inline dma_addr_t get_tces( struct TceTable *tbl, unsigned order, void *page, unsigned numPages, int direction )
{ {
long tcenum; long tcenum;
unsigned long uaddr; unsigned long uaddr;
...@@ -581,7 +583,7 @@ static void tce_free_one_pSeries( struct TceTable *tbl, long tcenum ) ...@@ -581,7 +583,7 @@ static void tce_free_one_pSeries( struct TceTable *tbl, long tcenum )
} }
#endif #endif
void tce_free(struct TceTable *tbl, dma_addr_t dma_addr, static void tce_free(struct TceTable *tbl, dma_addr_t dma_addr,
unsigned order, unsigned num_pages) unsigned order, unsigned num_pages)
{ {
long tcenum, total_tces, free_tce; long tcenum, total_tces, free_tce;
...@@ -701,6 +703,7 @@ void create_tce_tables_for_buses(struct list_head *bus_list) ...@@ -701,6 +703,7 @@ void create_tce_tables_for_buses(struct list_head *bus_list)
} }
} }
#ifdef CONFIG_PPC_PSERIES
void create_tce_tables_for_busesLP(struct list_head *bus_list) void create_tce_tables_for_busesLP(struct list_head *bus_list)
{ {
struct list_head *ln; struct list_head *ln;
...@@ -722,15 +725,19 @@ void create_tce_tables_for_busesLP(struct list_head *bus_list) ...@@ -722,15 +725,19 @@ void create_tce_tables_for_busesLP(struct list_head *bus_list)
create_tce_tables_for_busesLP(&bus->children); create_tce_tables_for_busesLP(&bus->children);
} }
} }
#endif
void create_tce_tables(void) { void create_tce_tables(void) {
struct pci_dev *dev = NULL; struct pci_dev *dev = NULL;
struct device_node *dn, *mydn; struct device_node *dn, *mydn;
#ifdef CONFIG_PPC_PSERIES
if (systemcfg->platform == PLATFORM_PSERIES_LPAR) { if (systemcfg->platform == PLATFORM_PSERIES_LPAR) {
create_tce_tables_for_busesLP(&pci_root_buses); create_tce_tables_for_busesLP(&pci_root_buses);
} }
else { else
#endif
{
create_tce_tables_for_buses(&pci_root_buses); create_tce_tables_for_buses(&pci_root_buses);
} }
/* Now copy the tce_table ptr from the bus devices down to every /* Now copy the tce_table ptr from the bus devices down to every
...@@ -884,6 +891,7 @@ static void getTceTableParmsiSeries(struct iSeries_Device_Node* DevNode, ...@@ -884,6 +891,7 @@ static void getTceTableParmsiSeries(struct iSeries_Device_Node* DevNode,
static void getTceTableParmsPSeries(struct pci_controller *phb, static void getTceTableParmsPSeries(struct pci_controller *phb,
struct device_node *dn, struct device_node *dn,
struct TceTable *newTceTable ) { struct TceTable *newTceTable ) {
#ifdef CONFIG_PPC_PSERIES
phandle node; phandle node;
unsigned long i; unsigned long i;
...@@ -953,6 +961,7 @@ static void getTceTableParmsPSeries(struct pci_controller *phb, ...@@ -953,6 +961,7 @@ static void getTceTableParmsPSeries(struct pci_controller *phb,
} }
i++; i++;
} }
#endif
} }
/* /*
...@@ -970,6 +979,7 @@ static void getTceTableParmsPSeries(struct pci_controller *phb, ...@@ -970,6 +979,7 @@ static void getTceTableParmsPSeries(struct pci_controller *phb,
static void getTceTableParmsPSeriesLP(struct pci_controller *phb, static void getTceTableParmsPSeriesLP(struct pci_controller *phb,
struct device_node *dn, struct device_node *dn,
struct TceTable *newTceTable ) { struct TceTable *newTceTable ) {
#ifdef CONFIG_PPC_PSERIES
u32 *dma_window = (u32 *)get_property(dn, "ibm,dma-window", 0); u32 *dma_window = (u32 *)get_property(dn, "ibm,dma-window", 0);
if (!dma_window) { if (!dma_window) {
panic("PCI_DMA: getTceTableParmsPSeriesLP: device %s has no ibm,dma-window property!\n", dn->full_name); panic("PCI_DMA: getTceTableParmsPSeriesLP: device %s has no ibm,dma-window property!\n", dn->full_name);
...@@ -985,6 +995,7 @@ static void getTceTableParmsPSeriesLP(struct pci_controller *phb, ...@@ -985,6 +995,7 @@ static void getTceTableParmsPSeriesLP(struct pci_controller *phb,
PPCDBG(PPCDBG_TCEINIT, "\tnewTceTable->index = 0x%lx\n", newTceTable->index); PPCDBG(PPCDBG_TCEINIT, "\tnewTceTable->index = 0x%lx\n", newTceTable->index);
PPCDBG(PPCDBG_TCEINIT, "\tnewTceTable->startOffset = 0x%lx\n", newTceTable->startOffset); PPCDBG(PPCDBG_TCEINIT, "\tnewTceTable->startOffset = 0x%lx\n", newTceTable->startOffset);
PPCDBG(PPCDBG_TCEINIT, "\tnewTceTable->size = 0x%lx\n", newTceTable->size); PPCDBG(PPCDBG_TCEINIT, "\tnewTceTable->size = 0x%lx\n", newTceTable->size);
#endif
} }
/* Allocates a contiguous real buffer and creates TCEs over it. /* Allocates a contiguous real buffer and creates TCEs over it.
......
...@@ -46,6 +46,7 @@ ...@@ -46,6 +46,7 @@
static void * __init static void * __init
update_dn_pci_info(struct device_node *dn, void *data) update_dn_pci_info(struct device_node *dn, void *data)
{ {
#ifdef CONFIG_PPC_PSERIES
struct pci_controller *phb = (struct pci_controller *)data; struct pci_controller *phb = (struct pci_controller *)data;
u32 *regs; u32 *regs;
char *device_type = get_property(dn, "device_type", 0); char *device_type = get_property(dn, "device_type", 0);
...@@ -64,6 +65,7 @@ update_dn_pci_info(struct device_node *dn, void *data) ...@@ -64,6 +65,7 @@ update_dn_pci_info(struct device_node *dn, void *data)
dn->devfn = (regs[0] >> 8) & 0xff; dn->devfn = (regs[0] >> 8) & 0xff;
} }
} }
#endif
return NULL; return NULL;
} }
...@@ -97,6 +99,7 @@ void *traverse_pci_devices(struct device_node *start, traverse_func pre, travers ...@@ -97,6 +99,7 @@ void *traverse_pci_devices(struct device_node *start, traverse_func pre, travers
return ret; return ret;
for (dn = start->child; dn; dn = nextdn) { for (dn = start->child; dn; dn = nextdn) {
nextdn = NULL; nextdn = NULL;
#ifdef CONFIG_PPC_PSERIES
if (get_property(dn, "class-code", 0)) { if (get_property(dn, "class-code", 0)) {
if (pre && (ret = pre(dn, data)) != NULL) if (pre && (ret = pre(dn, data)) != NULL)
return ret; return ret;
...@@ -112,6 +115,7 @@ void *traverse_pci_devices(struct device_node *start, traverse_func pre, travers ...@@ -112,6 +115,7 @@ void *traverse_pci_devices(struct device_node *start, traverse_func pre, travers
post(dn, data); post(dn, data);
} }
} }
#endif
if (!nextdn) { if (!nextdn) {
/* Walk up to next valid sibling. */ /* Walk up to next valid sibling. */
do { do {
......
...@@ -170,15 +170,15 @@ EXPORT_SYMBOL(flush_icache_user_range); ...@@ -170,15 +170,15 @@ EXPORT_SYMBOL(flush_icache_user_range);
EXPORT_SYMBOL(flush_dcache_page); EXPORT_SYMBOL(flush_dcache_page);
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
#ifdef CONFIG_PPC_ISERIES #ifdef CONFIG_PPC_ISERIES
EXPORT_SYMBOL(__no_use_restore_flags); EXPORT_SYMBOL(local_get_flags);
EXPORT_SYMBOL(__no_use_save_flags); EXPORT_SYMBOL(local_irq_disable);
EXPORT_SYMBOL(__no_use_sti); EXPORT_SYMBOL(local_irq_restore);
EXPORT_SYMBOL(__no_use_cli);
#endif #endif
#endif #endif
EXPORT_SYMBOL(ppc_md); EXPORT_SYMBOL(ppc_md);
#ifdef CONFIG_PPC_PSERIES
EXPORT_SYMBOL(find_devices); EXPORT_SYMBOL(find_devices);
EXPORT_SYMBOL(find_type_devices); EXPORT_SYMBOL(find_type_devices);
EXPORT_SYMBOL(find_compatible_devices); EXPORT_SYMBOL(find_compatible_devices);
...@@ -187,6 +187,7 @@ EXPORT_SYMBOL(device_is_compatible); ...@@ -187,6 +187,7 @@ EXPORT_SYMBOL(device_is_compatible);
EXPORT_SYMBOL(machine_is_compatible); EXPORT_SYMBOL(machine_is_compatible);
EXPORT_SYMBOL(find_all_nodes); EXPORT_SYMBOL(find_all_nodes);
EXPORT_SYMBOL(get_property); EXPORT_SYMBOL(get_property);
#endif
EXPORT_SYMBOL_NOVERS(memcpy); EXPORT_SYMBOL_NOVERS(memcpy);
......
...@@ -80,8 +80,8 @@ int proc_pmc_set_pmc6( struct file *file, const char *buffer, unsigned long cou ...@@ -80,8 +80,8 @@ int proc_pmc_set_pmc6( struct file *file, const char *buffer, unsigned long cou
int proc_pmc_set_pmc7( struct file *file, const char *buffer, unsigned long count, void *data); int proc_pmc_set_pmc7( struct file *file, const char *buffer, unsigned long count, void *data);
int proc_pmc_set_pmc8( struct file *file, const char *buffer, unsigned long count, void *data); int proc_pmc_set_pmc8( struct file *file, const char *buffer, unsigned long count, void *data);
#if 0
void proc_ppc64_init(void) int proc_ppc64_init(void)
{ {
unsigned long i; unsigned long i;
struct proc_dir_entry *ent = NULL; struct proc_dir_entry *ent = NULL;
...@@ -184,6 +184,7 @@ void proc_ppc64_init(void) ...@@ -184,6 +184,7 @@ void proc_ppc64_init(void)
ent->write_proc = NULL; ent->write_proc = NULL;
} }
} }
#endif
/* /*
* Find the requested 'file' given a proc token. * Find the requested 'file' given a proc token.
......
...@@ -55,6 +55,7 @@ static struct file_operations page_map_fops = { ...@@ -55,6 +55,7 @@ static struct file_operations page_map_fops = {
.mmap = page_map_mmap .mmap = page_map_mmap
}; };
#ifdef CONFIG_PPC_PSERIES
/* routines for /proc/ppc64/ofdt */ /* routines for /proc/ppc64/ofdt */
static ssize_t ofdt_write(struct file *, const char __user *, size_t, loff_t *); static ssize_t ofdt_write(struct file *, const char __user *, size_t, loff_t *);
static void proc_ppc64_create_ofdt(struct proc_dir_entry *); static void proc_ppc64_create_ofdt(struct proc_dir_entry *);
...@@ -66,6 +67,7 @@ static char * parse_next_property(char *, char *, char **, int *, unsigned char* ...@@ -66,6 +67,7 @@ static char * parse_next_property(char *, char *, char **, int *, unsigned char*
static struct file_operations ofdt_fops = { static struct file_operations ofdt_fops = {
.write = ofdt_write .write = ofdt_write
}; };
#endif
int __init proc_ppc64_init(void) int __init proc_ppc64_init(void)
{ {
...@@ -108,6 +110,7 @@ int __init proc_ppc64_init(void) ...@@ -108,6 +110,7 @@ int __init proc_ppc64_init(void)
} }
} }
#ifdef CONFIG_PPC_PSERIES
/* Placeholder for rtas interfaces. */ /* Placeholder for rtas interfaces. */
if (proc_ppc64.rtas == NULL) if (proc_ppc64.rtas == NULL)
proc_ppc64.rtas = proc_mkdir("rtas", proc_ppc64.root); proc_ppc64.rtas = proc_mkdir("rtas", proc_ppc64.root);
...@@ -116,6 +119,7 @@ int __init proc_ppc64_init(void) ...@@ -116,6 +119,7 @@ int __init proc_ppc64_init(void)
proc_symlink("rtas", 0, "ppc64/rtas"); proc_symlink("rtas", 0, "ppc64/rtas");
proc_ppc64_create_ofdt(proc_ppc64.root); proc_ppc64_create_ofdt(proc_ppc64.root);
#endif
return 0; return 0;
} }
...@@ -197,6 +201,7 @@ static int page_map_mmap( struct file *file, struct vm_area_struct *vma ) ...@@ -197,6 +201,7 @@ static int page_map_mmap( struct file *file, struct vm_area_struct *vma )
return 0; return 0;
} }
#ifdef CONFIG_PPC_PSERIES
/* create /proc/ppc64/ofdt write-only by root */ /* create /proc/ppc64/ofdt write-only by root */
static void proc_ppc64_create_ofdt(struct proc_dir_entry *parent) static void proc_ppc64_create_ofdt(struct proc_dir_entry *parent)
{ {
...@@ -417,5 +422,6 @@ static void release_prop_list(const struct property *prop) ...@@ -417,5 +422,6 @@ static void release_prop_list(const struct property *prop)
} }
} }
#endif /* defined(CONFIG_PPC_PSERIES) */
fs_initcall(proc_ppc64_init); fs_initcall(proc_ppc64_init);
...@@ -188,6 +188,7 @@ void setup_system(unsigned long r3, unsigned long r4, unsigned long r5, ...@@ -188,6 +188,7 @@ void setup_system(unsigned long r3, unsigned long r4, unsigned long r5,
#endif #endif
} }
#ifdef CONFIG_PPC_PSERIES
if (systemcfg->platform & PLATFORM_PSERIES) { if (systemcfg->platform & PLATFORM_PSERIES) {
early_console_initialized = 1; early_console_initialized = 1;
register_console(&udbg_console); register_console(&udbg_console);
...@@ -209,6 +210,7 @@ void setup_system(unsigned long r3, unsigned long r4, unsigned long r5, ...@@ -209,6 +210,7 @@ void setup_system(unsigned long r3, unsigned long r4, unsigned long r5,
} }
#endif #endif
} }
#endif
printk("Starting Linux PPC64 %s\n", UTS_RELEASE); printk("Starting Linux PPC64 %s\n", UTS_RELEASE);
...@@ -228,7 +230,7 @@ void setup_system(unsigned long r3, unsigned long r4, unsigned long r5, ...@@ -228,7 +230,7 @@ void setup_system(unsigned long r3, unsigned long r4, unsigned long r5,
mm_init_ppc64(); mm_init_ppc64();
#ifdef CONFIG_SMP #if defined(CONFIG_SMP) && defined(CONFIG_PPC_PSERIES)
if (cur_cpu_spec->firmware_features & FW_FEATURE_SPLPAR) { if (cur_cpu_spec->firmware_features & FW_FEATURE_SPLPAR) {
vpa_init(boot_cpuid); vpa_init(boot_cpuid);
} }
...@@ -310,6 +312,7 @@ static int show_cpuinfo(struct seq_file *m, void *v) ...@@ -310,6 +312,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
seq_printf(m, "\n"); seq_printf(m, "\n");
#ifdef CONFIG_PPC_PSERIES
/* /*
* Assume here that all clock rates are the same in a * Assume here that all clock rates are the same in a
* smp system. -- Cort * smp system. -- Cort
...@@ -328,6 +331,7 @@ static int show_cpuinfo(struct seq_file *m, void *v) ...@@ -328,6 +331,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
of_node_put(cpu_node); of_node_put(cpu_node);
} }
} }
#endif
if (ppc_md.setup_residual != NULL) if (ppc_md.setup_residual != NULL)
ppc_md.setup_residual(m, cpu_id); ppc_md.setup_residual(m, cpu_id);
...@@ -362,9 +366,6 @@ struct seq_operations cpuinfo_op = { ...@@ -362,9 +366,6 @@ struct seq_operations cpuinfo_op = {
void parse_cmd_line(unsigned long r3, unsigned long r4, unsigned long r5, void parse_cmd_line(unsigned long r3, unsigned long r4, unsigned long r5,
unsigned long r6, unsigned long r7) unsigned long r6, unsigned long r7)
{ {
struct device_node *chosen;
char *p;
#ifdef CONFIG_BLK_DEV_INITRD #ifdef CONFIG_BLK_DEV_INITRD
if ((initrd_start == 0) && r3 && r4 && r4 != 0xdeadbeef) { if ((initrd_start == 0) && r3 && r4 && r4 != 0xdeadbeef) {
initrd_start = (r3 >= KERNELBASE) ? r3 : (unsigned long)__va(r3); initrd_start = (r3 >= KERNELBASE) ? r3 : (unsigned long)__va(r3);
...@@ -380,13 +381,20 @@ void parse_cmd_line(unsigned long r3, unsigned long r4, unsigned long r5, ...@@ -380,13 +381,20 @@ void parse_cmd_line(unsigned long r3, unsigned long r4, unsigned long r5,
strlcpy(cmd_line, CONFIG_CMDLINE, sizeof(cmd_line)); strlcpy(cmd_line, CONFIG_CMDLINE, sizeof(cmd_line));
#endif /* CONFIG_CMDLINE */ #endif /* CONFIG_CMDLINE */
#ifdef CONFIG_PPC_PSERIES
{
struct device_node *chosen;
chosen = of_find_node_by_name(NULL, "chosen"); chosen = of_find_node_by_name(NULL, "chosen");
if (chosen != NULL) { if (chosen != NULL) {
char *p;
p = get_property(chosen, "bootargs", NULL); p = get_property(chosen, "bootargs", NULL);
if (p != NULL && p[0] != 0) if (p != NULL && p[0] != 0)
strlcpy(cmd_line, p, sizeof(cmd_line)); strlcpy(cmd_line, p, sizeof(cmd_line));
of_node_put(chosen); of_node_put(chosen);
} }
}
#endif
/* Look for mem= option on command line */ /* Look for mem= option on command line */
if (strstr(cmd_line, "mem=")) { if (strstr(cmd_line, "mem=")) {
...@@ -412,28 +420,7 @@ void parse_cmd_line(unsigned long r3, unsigned long r4, unsigned long r5, ...@@ -412,28 +420,7 @@ void parse_cmd_line(unsigned long r3, unsigned long r4, unsigned long r5,
} }
char *bi_tag2str(unsigned long tag) #ifdef CONFIG_PPC_PSERIES
{
switch (tag) {
case BI_FIRST:
return "BI_FIRST";
case BI_LAST:
return "BI_LAST";
case BI_CMD_LINE:
return "BI_CMD_LINE";
case BI_BOOTLOADER_ID:
return "BI_BOOTLOADER_ID";
case BI_INITRD:
return "BI_INITRD";
case BI_SYSMAP:
return "BI_SYSMAP";
case BI_MACHTYPE:
return "BI_MACHTYPE";
default:
return "BI_UNKNOWN";
}
}
int parse_bootinfo(void) int parse_bootinfo(void)
{ {
struct bi_record *rec; struct bi_record *rec;
...@@ -467,6 +454,7 @@ int parse_bootinfo(void) ...@@ -467,6 +454,7 @@ int parse_bootinfo(void)
return 0; return 0;
} }
#endif
int __init ppc_init(void) int __init ppc_init(void)
{ {
......
...@@ -141,7 +141,7 @@ static int smp_iSeries_probe(void) ...@@ -141,7 +141,7 @@ static int smp_iSeries_probe(void)
for (i=0; i < NR_CPUS; ++i) { for (i=0; i < NR_CPUS; ++i) {
lpPaca = paca[i].xLpPacaPtr; lpPaca = paca[i].xLpPacaPtr;
if (lpPaca->xDynProcStatus < 2) { if (lpPaca->xDynProcStatus < 2) {
paca[i].active = 1; /*paca[i].active = 1;*/
++np; ++np;
} }
} }
...@@ -187,7 +187,6 @@ void __init smp_init_iSeries(void) ...@@ -187,7 +187,6 @@ void __init smp_init_iSeries(void)
smp_ops->probe = smp_iSeries_probe; smp_ops->probe = smp_iSeries_probe;
smp_ops->kick_cpu = smp_iSeries_kick_cpu; smp_ops->kick_cpu = smp_iSeries_kick_cpu;
smp_ops->setup_cpu = smp_iSeries_setup_cpu; smp_ops->setup_cpu = smp_iSeries_setup_cpu;
#warning fix for iseries
systemcfg->processorCount = smp_iSeries_numProcs(); systemcfg->processorCount = smp_iSeries_numProcs();
} }
#endif #endif
...@@ -689,9 +688,11 @@ int __devinit start_secondary(void *unused) ...@@ -689,9 +688,11 @@ int __devinit start_secondary(void *unused)
get_paca()->yielded = 0; get_paca()->yielded = 0;
#ifdef CONFIG_PPC_PSERIES
if (cur_cpu_spec->firmware_features & FW_FEATURE_SPLPAR) { if (cur_cpu_spec->firmware_features & FW_FEATURE_SPLPAR) {
vpa_init(cpu); vpa_init(cpu);
} }
#endif
local_irq_enable(); local_irq_enable();
......
...@@ -16,6 +16,7 @@ ...@@ -16,6 +16,7 @@
* This file handles the architecture-dependent parts of hardware exceptions * This file handles the architecture-dependent parts of hardware exceptions
*/ */
#include <linux/config.h>
#include <linux/errno.h> #include <linux/errno.h>
#include <linux/sched.h> #include <linux/sched.h>
#include <linux/kernel.h> #include <linux/kernel.h>
...@@ -26,7 +27,6 @@ ...@@ -26,7 +27,6 @@
#include <linux/user.h> #include <linux/user.h>
#include <linux/a.out.h> #include <linux/a.out.h>
#include <linux/interrupt.h> #include <linux/interrupt.h>
#include <linux/config.h>
#include <linux/init.h> #include <linux/init.h>
#include <linux/module.h> #include <linux/module.h>
...@@ -40,8 +40,10 @@ ...@@ -40,8 +40,10 @@
extern int fix_alignment(struct pt_regs *); extern int fix_alignment(struct pt_regs *);
extern void bad_page_fault(struct pt_regs *, unsigned long, int); extern void bad_page_fault(struct pt_regs *, unsigned long, int);
#ifdef CONFIG_PPC_PSERIES
/* This is true if we are using the firmware NMI handler (typically LPAR) */ /* This is true if we are using the firmware NMI handler (typically LPAR) */
extern int fwnmi_active; extern int fwnmi_active;
#endif
#ifdef CONFIG_DEBUG_KERNEL #ifdef CONFIG_DEBUG_KERNEL
void (*debugger)(struct pt_regs *regs); void (*debugger)(struct pt_regs *regs);
...@@ -96,6 +98,7 @@ _exception(int signr, siginfo_t *info, struct pt_regs *regs) ...@@ -96,6 +98,7 @@ _exception(int signr, siginfo_t *info, struct pt_regs *regs)
force_sig_info(signr, info, current); force_sig_info(signr, info, current);
} }
#ifdef CONFIG_PPC_PSERIES
/* Get the error information for errors coming through the /* Get the error information for errors coming through the
* FWNMI vectors. The pt_regs' r3 will be updated to reflect * FWNMI vectors. The pt_regs' r3 will be updated to reflect
* the actual r3 if possible, and a ptr to the error log entry * the actual r3 if possible, and a ptr to the error log entry
...@@ -128,10 +131,12 @@ static void FWNMI_release_errinfo(void) ...@@ -128,10 +131,12 @@ static void FWNMI_release_errinfo(void)
if (ret != 0) if (ret != 0)
printk("FWNMI: nmi-interlock failed: %ld\n", ret); printk("FWNMI: nmi-interlock failed: %ld\n", ret);
} }
#endif
void void
SystemResetException(struct pt_regs *regs) SystemResetException(struct pt_regs *regs)
{ {
#ifdef CONFIG_PPC_PSERIES
if (fwnmi_active) { if (fwnmi_active) {
struct rtas_error_log *errhdr = FWNMI_get_errinfo(regs); struct rtas_error_log *errhdr = FWNMI_get_errinfo(regs);
if (errhdr) { if (errhdr) {
...@@ -139,6 +144,7 @@ SystemResetException(struct pt_regs *regs) ...@@ -139,6 +144,7 @@ SystemResetException(struct pt_regs *regs)
} }
FWNMI_release_errinfo(); FWNMI_release_errinfo();
} }
#endif
#ifdef CONFIG_DEBUG_KERNEL #ifdef CONFIG_DEBUG_KERNEL
if (debugger) if (debugger)
...@@ -154,6 +160,7 @@ SystemResetException(struct pt_regs *regs) ...@@ -154,6 +160,7 @@ SystemResetException(struct pt_regs *regs)
/* What should we do here? We could issue a shutdown or hard reset. */ /* What should we do here? We could issue a shutdown or hard reset. */
} }
#ifdef CONFIG_PPC_PSERIES
/* /*
* See if we can recover from a machine check exception. * See if we can recover from a machine check exception.
* This is only called on power4 (or above) and only via * This is only called on power4 (or above) and only via
...@@ -190,6 +197,7 @@ static int recover_mce(struct pt_regs *regs, struct rtas_error_log err) ...@@ -190,6 +197,7 @@ static int recover_mce(struct pt_regs *regs, struct rtas_error_log err)
} }
return 0; return 0;
} }
#endif
/* /*
* Handle a machine check. * Handle a machine check.
...@@ -207,6 +215,7 @@ static int recover_mce(struct pt_regs *regs, struct rtas_error_log err) ...@@ -207,6 +215,7 @@ static int recover_mce(struct pt_regs *regs, struct rtas_error_log err)
void void
MachineCheckException(struct pt_regs *regs) MachineCheckException(struct pt_regs *regs)
{ {
#ifdef CONFIG_PPC_PSERIES
struct rtas_error_log err, *errp; struct rtas_error_log err, *errp;
if (fwnmi_active) { if (fwnmi_active) {
...@@ -217,6 +226,7 @@ MachineCheckException(struct pt_regs *regs) ...@@ -217,6 +226,7 @@ MachineCheckException(struct pt_regs *regs)
if (errp && recover_mce(regs, err)) if (errp && recover_mce(regs, err))
return; return;
} }
#endif
#ifdef CONFIG_DEBUG_KERNEL #ifdef CONFIG_DEBUG_KERNEL
if (debugger_fault_handler) { if (debugger_fault_handler) {
......
/* -*- linux-c -*-
* arch/ppc64/kernel/viopath.c
*
* iSeries Virtual I/O Message Path code
*
* Authors: Dave Boutcher <boutcher@us.ibm.com>
* Ryan Arnold <ryanarn@us.ibm.com>
* Colin Devilbiss <devilbis@us.ibm.com>
*
* (C) Copyright 2000-2003 IBM Corporation
*
* This code is used by the iSeries virtual disk, cd,
* tape, and console to communicate with OS/400 in another
* partition.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation; either version 2 of the
* License, or (at your option) anyu later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
#include <linux/config.h>
#include <asm/uaccess.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/vmalloc.h>
#include <linux/string.h>
#include <linux/proc_fs.h>
#include <linux/pci.h>
#include <linux/wait.h>
#include <asm/hardirq.h> /* for is_atomic */
#include <asm/iSeries/LparData.h>
#include <asm/iSeries/HvLpEvent.h>
#include <asm/iSeries/HvLpConfig.h>
#include <asm/iSeries/HvCallCfg.h>
#include <asm/iSeries/mf.h>
#include <asm/iSeries/iSeries_proc.h>
#include <asm/iSeries/vio.h>
extern struct pci_dev *iSeries_vio_dev;
/* Status of the path to each other partition in the system.
* This is overkill, since we will only ever establish connections
* to our hosting partition and the primary partition on the system.
* But this allows for other support in the future.
*/
static struct viopathStatus {
int isOpen:1; /* Did we open the path? */
int isActive:1; /* Do we have a mon msg outstanding */
int users[VIO_MAX_SUBTYPES];
HvLpInstanceId mSourceInst;
HvLpInstanceId mTargetInst;
int numberAllocated;
} viopathStatus[HVMAXARCHITECTEDLPS];
static spinlock_t statuslock = SPIN_LOCK_UNLOCKED;
/*
* For each kind of event we allocate a buffer that is
* guaranteed not to cross a page boundary
*/
static unsigned char event_buffer[VIO_MAX_SUBTYPES * 256] __page_aligned;
static atomic_t event_buffer_available[VIO_MAX_SUBTYPES];
static int event_buffer_initialised;
static void handleMonitorEvent(struct HvLpEvent *event);
/*
* We use this structure to handle asynchronous responses. The caller
* blocks on the semaphore and the handler posts the semaphore. However,
* if in_atomic() is true in the caller, then wait_atomic is used ...
*/
struct doneAllocParms_t {
struct semaphore *sem;
int number;
volatile unsigned long *wait_atomic;
int used_wait_atomic;
};
/* Put a sequence number in each mon msg. The value is not
* important. Start at something other than 0 just for
* readability. wrapping this is ok.
*/
static u8 viomonseq = 22;
/* Our hosting logical partition. We get this at startup
* time, and different modules access this variable directly.
*/
HvLpIndex viopath_hostLp = 0xff; /* HvLpIndexInvalid */
EXPORT_SYMBOL(viopath_hostLp);
HvLpIndex viopath_ourLp = 0xff;
EXPORT_SYMBOL(viopath_ourLp);
/* For each kind of incoming event we set a pointer to a
* routine to call.
*/
static vio_event_handler_t *vio_handler[VIO_MAX_SUBTYPES];
static unsigned char e2a(unsigned char x)
{
switch (x) {
case 0xF0:
return '0';
case 0xF1:
return '1';
case 0xF2:
return '2';
case 0xF3:
return '3';
case 0xF4:
return '4';
case 0xF5:
return '5';
case 0xF6:
return '6';
case 0xF7:
return '7';
case 0xF8:
return '8';
case 0xF9:
return '9';
case 0xC1:
return 'A';
case 0xC2:
return 'B';
case 0xC3:
return 'C';
case 0xC4:
return 'D';
case 0xC5:
return 'E';
case 0xC6:
return 'F';
case 0xC7:
return 'G';
case 0xC8:
return 'H';
case 0xC9:
return 'I';
case 0xD1:
return 'J';
case 0xD2:
return 'K';
case 0xD3:
return 'L';
case 0xD4:
return 'M';
case 0xD5:
return 'N';
case 0xD6:
return 'O';
case 0xD7:
return 'P';
case 0xD8:
return 'Q';
case 0xD9:
return 'R';
case 0xE2:
return 'S';
case 0xE3:
return 'T';
case 0xE4:
return 'U';
case 0xE5:
return 'V';
case 0xE6:
return 'W';
case 0xE7:
return 'X';
case 0xE8:
return 'Y';
case 0xE9:
return 'Z';
}
return ' ';
}
/* Handle reads from the proc file system
*/
static int proc_read(char *buf, char **start, off_t offset,
int blen, int *eof, void *data)
{
HvLpEvent_Rc hvrc;
DECLARE_MUTEX_LOCKED(Semaphore);
dma_addr_t dmaa =
pci_map_single(iSeries_vio_dev, buf, PAGE_SIZE,
PCI_DMA_FROMDEVICE);
int len = PAGE_SIZE;
if (len > blen)
len = blen;
memset(buf, 0x00, len);
hvrc = HvCallEvent_signalLpEventFast(viopath_hostLp,
HvLpEvent_Type_VirtualIo,
viomajorsubtype_config | vioconfigget,
HvLpEvent_AckInd_DoAck, HvLpEvent_AckType_ImmediateAck,
viopath_sourceinst(viopath_hostLp),
viopath_targetinst(viopath_hostLp),
(u64)(unsigned long)&Semaphore, VIOVERSION << 16,
((u64)dmaa) << 32, len, 0, 0);
if (hvrc != HvLpEvent_Rc_Good)
printk("viopath hv error on op %d\n", (int) hvrc);
down(&Semaphore);
pci_unmap_single(iSeries_vio_dev, dmaa, PAGE_SIZE,
PCI_DMA_FROMDEVICE);
sprintf(buf + strlen(buf), "SRLNBR=");
buf[strlen(buf)] = e2a(xItExtVpdPanel.mfgID[2]);
buf[strlen(buf)] = e2a(xItExtVpdPanel.mfgID[3]);
buf[strlen(buf)] = e2a(xItExtVpdPanel.systemSerial[1]);
buf[strlen(buf)] = e2a(xItExtVpdPanel.systemSerial[2]);
buf[strlen(buf)] = e2a(xItExtVpdPanel.systemSerial[3]);
buf[strlen(buf)] = e2a(xItExtVpdPanel.systemSerial[4]);
buf[strlen(buf)] = e2a(xItExtVpdPanel.systemSerial[5]);
buf[strlen(buf)] = '\n';
*eof = 1;
return strlen(buf);
}
/* Handle writes to our proc file system
*/
static int proc_write(struct file *file, const char *buffer,
unsigned long count, void *data)
{
/* Doesn't do anything today!!!
*/
return count;
}
/* setup our proc file system entries
*/
static void vio_proc_init(struct proc_dir_entry *iSeries_proc)
{
struct proc_dir_entry *ent;
ent = create_proc_entry("config", S_IFREG | S_IRUSR, iSeries_proc);
if (!ent)
return;
ent->nlink = 1;
ent->data = NULL;
ent->read_proc = proc_read;
ent->write_proc = proc_write;
}
/* See if a given LP is active. Allow for invalid lps to be passed in
* and just return invalid
*/
int viopath_isactive(HvLpIndex lp)
{
if (lp == HvLpIndexInvalid)
return 0;
if (lp < HVMAXARCHITECTEDLPS)
return viopathStatus[lp].isActive;
else
return 0;
}
EXPORT_SYMBOL(viopath_isactive);
/*
* We cache the source and target instance ids for each
* partition.
*/
HvLpInstanceId viopath_sourceinst(HvLpIndex lp)
{
return viopathStatus[lp].mSourceInst;
}
EXPORT_SYMBOL(viopath_sourceinst);
HvLpInstanceId viopath_targetinst(HvLpIndex lp)
{
return viopathStatus[lp].mTargetInst;
}
EXPORT_SYMBOL(viopath_targetinst);
/*
* Send a monitor message. This is a message with the acknowledge
* bit on that the other side will NOT explicitly acknowledge. When
* the other side goes down, the hypervisor will acknowledge any
* outstanding messages....so we will know when the other side dies.
*/
static void sendMonMsg(HvLpIndex remoteLp)
{
HvLpEvent_Rc hvrc;
viopathStatus[remoteLp].mSourceInst =
HvCallEvent_getSourceLpInstanceId(remoteLp,
HvLpEvent_Type_VirtualIo);
viopathStatus[remoteLp].mTargetInst =
HvCallEvent_getTargetLpInstanceId(remoteLp,
HvLpEvent_Type_VirtualIo);
/*
* Deliberately ignore the return code here. if we call this
* more than once, we don't care.
*/
vio_setHandler(viomajorsubtype_monitor, handleMonitorEvent);
hvrc = HvCallEvent_signalLpEventFast(remoteLp, HvLpEvent_Type_VirtualIo,
viomajorsubtype_monitor, HvLpEvent_AckInd_DoAck,
HvLpEvent_AckType_DeferredAck,
viopathStatus[remoteLp].mSourceInst,
viopathStatus[remoteLp].mTargetInst,
viomonseq++, 0, 0, 0, 0, 0);
if (hvrc == HvLpEvent_Rc_Good)
viopathStatus[remoteLp].isActive = 1;
else {
printk(KERN_WARNING_VIO "could not connect to partition %d\n",
remoteLp);
viopathStatus[remoteLp].isActive = 0;
}
}
static void handleMonitorEvent(struct HvLpEvent *event)
{
HvLpIndex remoteLp;
int i;
/*
* This handler is _also_ called as part of the loop
* at the end of this routine, so it must be able to
* ignore NULL events...
*/
if (!event)
return;
/*
* First see if this is just a normal monitor message from the
* other partition
*/
if (event->xFlags.xFunction == HvLpEvent_Function_Int) {
remoteLp = event->xSourceLp;
if (!viopathStatus[remoteLp].isActive)
sendMonMsg(remoteLp);
return;
}
/*
* This path is for an acknowledgement; the other partition
* died
*/
remoteLp = event->xTargetLp;
if ((event->xSourceInstanceId != viopathStatus[remoteLp].mSourceInst) ||
(event->xTargetInstanceId != viopathStatus[remoteLp].mTargetInst)) {
printk(KERN_WARNING_VIO "ignoring ack....mismatched instances\n");
return;
}
printk(KERN_WARNING_VIO "partition %d ended\n", remoteLp);
viopathStatus[remoteLp].isActive = 0;
/*
* For each active handler, pass them a NULL
* message to indicate that the other partition
* died
*/
for (i = 0; i < VIO_MAX_SUBTYPES; i++) {
if (vio_handler[i] != NULL)
(*vio_handler[i])(NULL);
}
}
int vio_setHandler(int subtype, vio_event_handler_t *beh)
{
subtype = subtype >> VIOMAJOR_SUBTYPE_SHIFT;
if ((subtype < 0) || (subtype >= VIO_MAX_SUBTYPES))
return -EINVAL;
if (vio_handler[subtype] != NULL)
return -EBUSY;
vio_handler[subtype] = beh;
return 0;
}
EXPORT_SYMBOL(vio_setHandler);
int vio_clearHandler(int subtype)
{
subtype = subtype >> VIOMAJOR_SUBTYPE_SHIFT;
if ((subtype < 0) || (subtype >= VIO_MAX_SUBTYPES))
return -EINVAL;
if (vio_handler[subtype] == NULL)
return -EAGAIN;
vio_handler[subtype] = NULL;
return 0;
}
EXPORT_SYMBOL(vio_clearHandler);
static void handleConfig(struct HvLpEvent *event)
{
if (!event)
return;
if (event->xFlags.xFunction == HvLpEvent_Function_Int) {
printk(KERN_WARNING_VIO
"unexpected config request from partition %d",
event->xSourceLp);
if ((event->xFlags.xFunction == HvLpEvent_Function_Int) &&
(event->xFlags.xAckInd == HvLpEvent_AckInd_DoAck)) {
event->xRc = HvLpEvent_Rc_InvalidSubtype;
HvCallEvent_ackLpEvent(event);
}
return;
}
up((struct semaphore *)event->xCorrelationToken);
}
/*
* Initialization of the hosting partition
*/
void vio_set_hostlp(void)
{
/*
* If this has already been set then we DON'T want to either change
* it or re-register the proc file system
*/
if (viopath_hostLp != HvLpIndexInvalid)
return;
/*
* Figure out our hosting partition. This isn't allowed to change
* while we're active
*/
viopath_ourLp = HvLpConfig_getLpIndex();
viopath_hostLp = HvCallCfg_getHostingLpIndex(viopath_ourLp);
/* If we have a valid hosting LP, create a proc file system entry
* for config information
*/
if (viopath_hostLp != HvLpIndexInvalid) {
iSeries_proc_callback(&vio_proc_init);
vio_setHandler(viomajorsubtype_config, handleConfig);
}
}
EXPORT_SYMBOL(vio_set_hostlp);
static void vio_handleEvent(struct HvLpEvent *event, struct pt_regs *regs)
{
HvLpIndex remoteLp;
int subtype = (event->xSubtype & VIOMAJOR_SUBTYPE_MASK)
>> VIOMAJOR_SUBTYPE_SHIFT;
if (event->xFlags.xFunction == HvLpEvent_Function_Int) {
remoteLp = event->xSourceLp;
/*
* The isActive is checked because if the hosting partition
* went down and came back up it would not be active but it
* would have different source and target instances, in which
* case we'd want to reset them. This case really protects
* against an unauthorized active partition sending interrupts
* or acks to this linux partition.
*/
if (viopathStatus[remoteLp].isActive
&& (event->xSourceInstanceId !=
viopathStatus[remoteLp].mTargetInst)) {
printk(KERN_WARNING_VIO
"message from invalid partition. "
"int msg rcvd, source inst (%d) doesnt match (%d)\n",
viopathStatus[remoteLp].mTargetInst,
event->xSourceInstanceId);
return;
}
if (viopathStatus[remoteLp].isActive
&& (event->xTargetInstanceId !=
viopathStatus[remoteLp].mSourceInst)) {
printk(KERN_WARNING_VIO
"message from invalid partition. "
"int msg rcvd, target inst (%d) doesnt match (%d)\n",
viopathStatus[remoteLp].mSourceInst,
event->xTargetInstanceId);
return;
}
} else {
remoteLp = event->xTargetLp;
if (event->xSourceInstanceId !=
viopathStatus[remoteLp].mSourceInst) {
printk(KERN_WARNING_VIO
"message from invalid partition. "
"ack msg rcvd, source inst (%d) doesnt match (%d)\n",
viopathStatus[remoteLp].mSourceInst,
event->xSourceInstanceId);
return;
}
if (event->xTargetInstanceId !=
viopathStatus[remoteLp].mTargetInst) {
printk(KERN_WARNING_VIO
"message from invalid partition. "
"viopath: ack msg rcvd, target inst (%d) doesnt match (%d)\n",
viopathStatus[remoteLp].mTargetInst,
event->xTargetInstanceId);
return;
}
}
if (vio_handler[subtype] == NULL) {
printk(KERN_WARNING_VIO
"unexpected virtual io event subtype %d from partition %d\n",
event->xSubtype, remoteLp);
/* No handler. Ack if necessary */
if ((event->xFlags.xFunction == HvLpEvent_Function_Int) &&
(event->xFlags.xAckInd == HvLpEvent_AckInd_DoAck)) {
event->xRc = HvLpEvent_Rc_InvalidSubtype;
HvCallEvent_ackLpEvent(event);
}
return;
}
/* This innocuous little line is where all the real work happens */
(*vio_handler[subtype])(event);
}
static void viopath_donealloc(void *parm, int number)
{
struct doneAllocParms_t *parmsp = (struct doneAllocParms_t *)parm;
parmsp->number = number;
if (parmsp->used_wait_atomic)
*(parmsp->wait_atomic) = 0;
else
up(parmsp->sem);
}
static int allocateEvents(HvLpIndex remoteLp, int numEvents)
{
struct doneAllocParms_t parms;
DECLARE_MUTEX_LOCKED(Semaphore);
volatile unsigned long wait_atomic = 1;
if (in_atomic()) {
parms.used_wait_atomic = 1;
parms.wait_atomic = &wait_atomic;
} else {
parms.used_wait_atomic = 0;
parms.sem = &Semaphore;
}
mf_allocateLpEvents(remoteLp, HvLpEvent_Type_VirtualIo, 250, /* It would be nice to put a real number here! */
numEvents, &viopath_donealloc, &parms);
if (in_atomic()) {
while (wait_atomic)
mb();
} else
down(&Semaphore);
return parms.number;
}
int viopath_open(HvLpIndex remoteLp, int subtype, int numReq)
{
int i;
unsigned long flags;
int tempNumAllocated;
if ((remoteLp >= HvMaxArchitectedLps) || (remoteLp == HvLpIndexInvalid))
return -EINVAL;
subtype = subtype >> VIOMAJOR_SUBTYPE_SHIFT;
if ((subtype < 0) || (subtype >= VIO_MAX_SUBTYPES))
return -EINVAL;
spin_lock_irqsave(&statuslock, flags);
if (!event_buffer_initialised) {
for (i = 0; i < VIO_MAX_SUBTYPES; i++)
atomic_set(&event_buffer_available[i], 1);
event_buffer_initialised = 1;
}
viopathStatus[remoteLp].users[subtype]++;
if (!viopathStatus[remoteLp].isOpen) {
viopathStatus[remoteLp].isOpen = 1;
HvCallEvent_openLpEventPath(remoteLp, HvLpEvent_Type_VirtualIo);
/*
* Don't hold the spinlock during an operation that
* can sleep.
*/
spin_unlock_irqrestore(&statuslock, flags);
tempNumAllocated = allocateEvents(remoteLp, 1);
spin_lock_irqsave(&statuslock, flags);
viopathStatus[remoteLp].numberAllocated += tempNumAllocated;
if (viopathStatus[remoteLp].numberAllocated == 0) {
HvCallEvent_closeLpEventPath(remoteLp,
HvLpEvent_Type_VirtualIo);
spin_unlock_irqrestore(&statuslock, flags);
return -ENOMEM;
}
viopathStatus[remoteLp].mSourceInst =
HvCallEvent_getSourceLpInstanceId(remoteLp,
HvLpEvent_Type_VirtualIo);
viopathStatus[remoteLp].mTargetInst =
HvCallEvent_getTargetLpInstanceId(remoteLp,
HvLpEvent_Type_VirtualIo);
HvLpEvent_registerHandler(HvLpEvent_Type_VirtualIo,
&vio_handleEvent);
sendMonMsg(remoteLp);
printk(KERN_INFO_VIO
"Opening connection to partition %d, setting sinst %d, tinst %d\n",
remoteLp, viopathStatus[remoteLp].mSourceInst,
viopathStatus[remoteLp].mTargetInst);
}
spin_unlock_irqrestore(&statuslock, flags);
tempNumAllocated = allocateEvents(remoteLp, numReq);
spin_lock_irqsave(&statuslock, flags);
viopathStatus[remoteLp].numberAllocated += tempNumAllocated;
spin_unlock_irqrestore(&statuslock, flags);
return 0;
}
EXPORT_SYMBOL(viopath_open);
int viopath_close(HvLpIndex remoteLp, int subtype, int numReq)
{
unsigned long flags;
int i;
int numOpen;
struct doneAllocParms_t doneAllocParms;
DECLARE_MUTEX_LOCKED(Semaphore);
if ((remoteLp >= HvMaxArchitectedLps) || (remoteLp == HvLpIndexInvalid))
return -EINVAL;
subtype = subtype >> VIOMAJOR_SUBTYPE_SHIFT;
if ((subtype < 0) || (subtype >= VIO_MAX_SUBTYPES))
return -EINVAL;
spin_lock_irqsave(&statuslock, flags);
/*
* If the viopath_close somehow gets called before a
* viopath_open it could decrement to -1 which is a non
* recoverable state so we'll prevent this from
* happening.
*/
if (viopathStatus[remoteLp].users[subtype] > 0)
viopathStatus[remoteLp].users[subtype]--;
spin_unlock_irqrestore(&statuslock, flags);
doneAllocParms.sem = &Semaphore;
mf_deallocateLpEvents(remoteLp, HvLpEvent_Type_VirtualIo,
numReq, &viopath_donealloc, &doneAllocParms);
down(&Semaphore);
spin_lock_irqsave(&statuslock, flags);
for (i = 0, numOpen = 0; i < VIO_MAX_SUBTYPES; i++)
numOpen += viopathStatus[remoteLp].users[i];
if ((viopathStatus[remoteLp].isOpen) && (numOpen == 0)) {
printk(KERN_INFO_VIO "Closing connection to partition %d",
remoteLp);
HvCallEvent_closeLpEventPath(remoteLp,
HvLpEvent_Type_VirtualIo);
viopathStatus[remoteLp].isOpen = 0;
viopathStatus[remoteLp].isActive = 0;
for (i = 0; i < VIO_MAX_SUBTYPES; i++)
atomic_set(&event_buffer_available[i], 0);
event_buffer_initialised = 0;
}
spin_unlock_irqrestore(&statuslock, flags);
return 0;
}
EXPORT_SYMBOL(viopath_close);
void *vio_get_event_buffer(int subtype)
{
subtype = subtype >> VIOMAJOR_SUBTYPE_SHIFT;
if ((subtype < 0) || (subtype >= VIO_MAX_SUBTYPES))
return NULL;
if (atomic_dec_if_positive(&event_buffer_available[subtype]) == 0)
return &event_buffer[subtype * 256];
else
return NULL;
}
EXPORT_SYMBOL(vio_get_event_buffer);
void vio_free_event_buffer(int subtype, void *buffer)
{
subtype = subtype >> VIOMAJOR_SUBTYPE_SHIFT;
if ((subtype < 0) || (subtype >= VIO_MAX_SUBTYPES)) {
printk(KERN_WARNING_VIO
"unexpected subtype %d freeing event buffer\n",
subtype);
return;
}
if (atomic_read(&event_buffer_available[subtype]) != 0) {
printk(KERN_WARNING_VIO
"freeing unallocated event buffer, subtype %d\n",
subtype);
return;
}
if (buffer != &event_buffer[subtype * 256]) {
printk(KERN_WARNING_VIO
"freeing invalid event buffer, subtype %d\n",
subtype);
}
atomic_set(&event_buffer_available[subtype], 1);
}
EXPORT_SYMBOL(vio_free_event_buffer);
static const struct vio_error_entry vio_no_error =
{ 0, 0, "Non-VIO Error" };
static const struct vio_error_entry vio_unknown_error =
{ 0, EIO, "Unknown Error" };
static const struct vio_error_entry vio_default_errors[] = {
{0x0001, EIO, "No Connection"},
{0x0002, EIO, "No Receiver"},
{0x0003, EIO, "No Buffer Available"},
{0x0004, EBADRQC, "Invalid Message Type"},
{0x0000, 0, NULL},
};
const struct vio_error_entry *vio_lookup_rc(
const struct vio_error_entry *local_table, u16 rc)
{
const struct vio_error_entry *cur;
if (!rc)
return &vio_no_error;
if (local_table)
for (cur = local_table; cur->rc; ++cur)
if (cur->rc == rc)
return cur;
for (cur = vio_default_errors; cur->rc; ++cur)
if (cur->rc == rc)
return cur;
return &vio_unknown_error;
}
EXPORT_SYMBOL(vio_lookup_rc);
...@@ -23,25 +23,16 @@ ...@@ -23,25 +23,16 @@
// drive the hypervisor from the OS. // drive the hypervisor from the OS.
// //
//=========================================================================== //===========================================================================
#ifndef _HVCALL_H
#define _HVCALL_H
//------------------------------------------------------------------- //-------------------------------------------------------------------
// Standard Includes // Standard Includes
//------------------------------------------------------------------- //-------------------------------------------------------------------
#ifndef _HVCALLSC_H #include <asm/iSeries/HvCallSc.h>
#include "HvCallSc.h"
#endif
#ifndef _HVTYPES_H
#include <asm/iSeries/HvTypes.h> #include <asm/iSeries/HvTypes.h>
#endif
#include <asm/paca.h> #include <asm/paca.h>
//-------------------------------------------------------------------
// Constants
//-------------------------------------------------------------------
#ifndef _HVCALL_H
#define _HVCALL_H
/* /*
enum HvCall_ReturnCode enum HvCall_ReturnCode
{ {
...@@ -211,5 +202,4 @@ static inline void HvCall_setDebugBus(unsigned long val) ...@@ -211,5 +202,4 @@ static inline void HvCall_setDebugBus(unsigned long val)
HvCall1(HvCallBaseSetDebugBus, val); HvCall1(HvCallBaseSetDebugBus, val);
} }
#endif // _HVCALL_H #endif /* _HVCALL_H */
...@@ -23,23 +23,18 @@ ...@@ -23,23 +23,18 @@
// drive the hypervisor from the OS. // drive the hypervisor from the OS.
// //
//===================================================================================== //=====================================================================================
#ifndef _HVCALLCFG_H
#define _HVCALLCFG_H
//------------------------------------------------------------------- //-------------------------------------------------------------------
// Standard Includes // Standard Includes
//------------------------------------------------------------------- //-------------------------------------------------------------------
#ifndef _HVCALLSC_H #include <asm/iSeries/HvCallSc.h>
#include "HvCallSc.h"
#endif
#ifndef _HVTYPES_H
#include <asm/iSeries/HvTypes.h> #include <asm/iSeries/HvTypes.h>
#endif
//------------------------------------------------------------------------------------- //-------------------------------------------------------------------------------------
// Constants // Constants
//------------------------------------------------------------------------------------- //-------------------------------------------------------------------------------------
#ifndef _HVCALLCFG_H
#define _HVCALLCFG_H
enum HvCallCfg_ReqQual enum HvCallCfg_ReqQual
{ {
...@@ -215,5 +210,4 @@ static inline HvLpIndex HvCallCfg_getHostingLpIndex(HvLpIndex lp) ...@@ -215,5 +210,4 @@ static inline HvLpIndex HvCallCfg_getHostingLpIndex(HvLpIndex lp)
} }
#endif // _HVCALLCFG_H #endif /* _HVCALLCFG_H */
...@@ -17,44 +17,27 @@ ...@@ -17,44 +17,27 @@
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/ */
//================================================================== /*
// * This file contains the "hypervisor call" interface which is used to
// This file contains the "hypervisor call" interface which is used to * drive the hypervisor from the OS.
// drive the hypervisor from the OS. */
// #ifndef _HVCALLEVENT_H
//================================================================== #define _HVCALLEVENT_H
//-------------------------------------------------------------------
// Standard Includes
//-------------------------------------------------------------------
#ifndef _HVCALLSC_H
#include <asm/iSeries/HvCallSc.h>
#endif
#ifndef _HVTYPES_H /*
* Standard Includes
*/
#include <asm/iSeries/HvCallSc.h>
#include <asm/iSeries/HvTypes.h> #include <asm/iSeries/HvTypes.h>
#endif
#include <asm/abs_addr.h> #include <asm/abs_addr.h>
//-------------------------------------------------------------------
// Other Includes
//-------------------------------------------------------------------
//-------------------------------------------------------------------
// Constants
//-------------------------------------------------------------------
#ifndef _HVCALLEVENT_H
#define _HVCALLEVENT_H
struct HvLpEvent; struct HvLpEvent;
typedef u8 HvLpEvent_Type; typedef u8 HvLpEvent_Type;
typedef u8 HvLpEvent_AckInd; typedef u8 HvLpEvent_AckInd;
typedef u8 HvLpEvent_AckType; typedef u8 HvLpEvent_AckType;
struct HvCallEvent_PackedParms struct HvCallEvent_PackedParms {
{
u8 xAckType:1; u8 xAckType:1;
u8 xAckInd:1; u8 xAckInd:1;
u8 xRsvd:1; u8 xRsvd:1;
...@@ -68,8 +51,7 @@ struct HvCallEvent_PackedParms ...@@ -68,8 +51,7 @@ struct HvCallEvent_PackedParms
typedef u8 HvLpDma_Direction; typedef u8 HvLpDma_Direction;
typedef u8 HvLpDma_AddressType; typedef u8 HvLpDma_AddressType;
struct HvCallEvent_PackedDmaParms struct HvCallEvent_PackedDmaParms {
{
u8 xDirection:1; u8 xDirection:1;
u8 xLocalAddrType:1; u8 xLocalAddrType:1;
u8 xRemoteAddrType:1; u8 xRemoteAddrType:1;
...@@ -101,69 +83,63 @@ typedef u64 HvLpDma_Rc; ...@@ -101,69 +83,63 @@ typedef u64 HvLpDma_Rc;
#define HvCallEventSetLpEventQueueInterruptProc HvCallEvent + 14 #define HvCallEventSetLpEventQueueInterruptProc HvCallEvent + 14
#define HvCallEventRouter15 HvCallEvent + 15 #define HvCallEventRouter15 HvCallEvent + 15
//====================================================================== static inline void HvCallEvent_getOverflowLpEvents(u8 queueIndex)
static inline void HvCallEvent_getOverflowLpEvents(u8 queueIndex)
{ {
HvCall1(HvCallEventGetOverflowLpEvents,queueIndex); HvCall1(HvCallEventGetOverflowLpEvents,queueIndex);
// getPaca()->adjustHmtForNoOfSpinLocksHeld(); // getPaca()->adjustHmtForNoOfSpinLocksHeld();
} }
//======================================================================
static inline void HvCallEvent_setInterLpQueueIndex(u8 queueIndex) static inline void HvCallEvent_setInterLpQueueIndex(u8 queueIndex)
{ {
HvCall1(HvCallEventSetInterLpQueueIndex,queueIndex); HvCall1(HvCallEventSetInterLpQueueIndex,queueIndex);
// getPaca()->adjustHmtForNoOfSpinLocksHeld(); // getPaca()->adjustHmtForNoOfSpinLocksHeld();
} }
//======================================================================
static inline void HvCallEvent_setLpEventStack(u8 queueIndex, static inline void HvCallEvent_setLpEventStack(u8 queueIndex,
char * eventStackAddr, char *eventStackAddr, u32 eventStackSize)
u32 eventStackSize)
{ {
u64 abs_addr; u64 abs_addr;
abs_addr = virt_to_absolute( (unsigned long) eventStackAddr );
HvCall3(HvCallEventSetLpEventStack, queueIndex, abs_addr, eventStackSize); abs_addr = virt_to_absolute((unsigned long)eventStackAddr);
HvCall3(HvCallEventSetLpEventStack, queueIndex, abs_addr,
eventStackSize);
// getPaca()->adjustHmtForNoOfSpinLocksHeld(); // getPaca()->adjustHmtForNoOfSpinLocksHeld();
} }
//======================================================================
static inline void HvCallEvent_setLpEventQueueInterruptProc(u8 queueIndex, static inline void HvCallEvent_setLpEventQueueInterruptProc(u8 queueIndex,
u16 lpLogicalProcIndex) u16 lpLogicalProcIndex)
{ {
HvCall2(HvCallEventSetLpEventQueueInterruptProc,queueIndex,lpLogicalProcIndex); HvCall2(HvCallEventSetLpEventQueueInterruptProc, queueIndex,
lpLogicalProcIndex);
// getPaca()->adjustHmtForNoOfSpinLocksHeld(); // getPaca()->adjustHmtForNoOfSpinLocksHeld();
} }
//=====================================================================
static inline HvLpEvent_Rc HvCallEvent_signalLpEvent(struct HvLpEvent* event) static inline HvLpEvent_Rc HvCallEvent_signalLpEvent(struct HvLpEvent *event)
{ {
u64 abs_addr; u64 abs_addr;
HvLpEvent_Rc retVal; HvLpEvent_Rc retVal;
#ifdef DEBUG_SENDEVENT #ifdef DEBUG_SENDEVENT
printk("HvCallEvent_signalLpEvent: *event = %016lx\n ", (unsigned long)event); printk("HvCallEvent_signalLpEvent: *event = %016lx\n ",
(unsigned long)event);
#endif #endif
abs_addr = virt_to_absolute( (unsigned long) event ); abs_addr = virt_to_absolute((unsigned long)event);
retVal = (HvLpEvent_Rc)HvCall1(HvCallEventSignalLpEvent, abs_addr); retVal = (HvLpEvent_Rc)HvCall1(HvCallEventSignalLpEvent, abs_addr);
// getPaca()->adjustHmtForNoOfSpinLocksHeld(); // getPaca()->adjustHmtForNoOfSpinLocksHeld();
return retVal; return retVal;
} }
//=====================================================================
static inline HvLpEvent_Rc HvCallEvent_signalLpEventFast(HvLpIndex targetLp, static inline HvLpEvent_Rc HvCallEvent_signalLpEventFast(HvLpIndex targetLp,
HvLpEvent_Type type, HvLpEvent_Type type, u16 subtype, HvLpEvent_AckInd ackInd,
u16 subtype, HvLpEvent_AckType ackType, HvLpInstanceId sourceInstanceId,
HvLpEvent_AckInd ackInd, HvLpInstanceId targetInstanceId, u64 correlationToken,
HvLpEvent_AckType ackType, u64 eventData1, u64 eventData2, u64 eventData3,
HvLpInstanceId sourceInstanceId, u64 eventData4, u64 eventData5)
HvLpInstanceId targetInstanceId,
u64 correlationToken,
u64 eventData1,
u64 eventData2,
u64 eventData3,
u64 eventData4,
u64 eventData5)
{ {
HvLpEvent_Rc retVal; HvLpEvent_Rc retVal;
// Pack the misc bits into a single Dword to pass to PLIC // Pack the misc bits into a single Dword to pass to PLIC
union union {
{
struct HvCallEvent_PackedParms parms; struct HvCallEvent_PackedParms parms;
u64 dword; u64 dword;
} packed; } packed;
...@@ -177,88 +153,84 @@ static inline HvLpEvent_Rc HvCallEvent_signalLpEventFast(HvLpIndex targetLp, ...@@ -177,88 +153,84 @@ static inline HvLpEvent_Rc HvCallEvent_signalLpEventFast(HvLpIndex targetLp,
packed.parms.xTargetInstId = targetInstanceId; packed.parms.xTargetInstId = targetInstanceId;
retVal = (HvLpEvent_Rc)HvCall7(HvCallEventSignalLpEventParms, retVal = (HvLpEvent_Rc)HvCall7(HvCallEventSignalLpEventParms,
packed.dword, packed.dword, correlationToken, eventData1,eventData2,
correlationToken, eventData3,eventData4, eventData5);
eventData1,eventData2,
eventData3,eventData4,
eventData5);
// getPaca()->adjustHmtForNoOfSpinLocksHeld(); // getPaca()->adjustHmtForNoOfSpinLocksHeld();
return retVal; return retVal;
} }
//====================================================================
static inline HvLpEvent_Rc HvCallEvent_ackLpEvent(struct HvLpEvent* event) static inline HvLpEvent_Rc HvCallEvent_ackLpEvent(struct HvLpEvent *event)
{ {
u64 abs_addr; u64 abs_addr;
HvLpEvent_Rc retVal; HvLpEvent_Rc retVal;
abs_addr = virt_to_absolute( (unsigned long) event );
abs_addr = virt_to_absolute((unsigned long)event);
retVal = (HvLpEvent_Rc)HvCall1(HvCallEventAckLpEvent, abs_addr); retVal = (HvLpEvent_Rc)HvCall1(HvCallEventAckLpEvent, abs_addr);
// getPaca()->adjustHmtForNoOfSpinLocksHeld(); // getPaca()->adjustHmtForNoOfSpinLocksHeld();
return retVal; return retVal;
} }
//====================================================================
static inline HvLpEvent_Rc HvCallEvent_cancelLpEvent(struct HvLpEvent* event) static inline HvLpEvent_Rc HvCallEvent_cancelLpEvent(struct HvLpEvent *event)
{ {
u64 abs_addr; u64 abs_addr;
HvLpEvent_Rc retVal; HvLpEvent_Rc retVal;
abs_addr = virt_to_absolute( (unsigned long) event );
abs_addr = virt_to_absolute((unsigned long)event);
retVal = (HvLpEvent_Rc)HvCall1(HvCallEventCancelLpEvent, abs_addr); retVal = (HvLpEvent_Rc)HvCall1(HvCallEventCancelLpEvent, abs_addr);
// getPaca()->adjustHmtForNoOfSpinLocksHeld(); // getPaca()->adjustHmtForNoOfSpinLocksHeld();
return retVal; return retVal;
} }
//===================================================================
static inline HvLpInstanceId HvCallEvent_getSourceLpInstanceId(HvLpIndex targetLp, HvLpEvent_Type type) static inline HvLpInstanceId HvCallEvent_getSourceLpInstanceId(
HvLpIndex targetLp, HvLpEvent_Type type)
{ {
HvLpInstanceId retVal; HvLpInstanceId retVal;
retVal = HvCall2(HvCallEventGetSourceLpInstanceId,targetLp,type);
retVal = HvCall2(HvCallEventGetSourceLpInstanceId, targetLp, type);
// getPaca()->adjustHmtForNoOfSpinLocksHeld(); // getPaca()->adjustHmtForNoOfSpinLocksHeld();
return retVal; return retVal;
} }
//===================================================================
static inline HvLpInstanceId HvCallEvent_getTargetLpInstanceId(HvLpIndex targetLp, HvLpEvent_Type type) static inline HvLpInstanceId HvCallEvent_getTargetLpInstanceId(
HvLpIndex targetLp, HvLpEvent_Type type)
{ {
HvLpInstanceId retVal; HvLpInstanceId retVal;
retVal = HvCall2(HvCallEventGetTargetLpInstanceId,targetLp,type);
retVal = HvCall2(HvCallEventGetTargetLpInstanceId, targetLp, type);
// getPaca()->adjustHmtForNoOfSpinLocksHeld(); // getPaca()->adjustHmtForNoOfSpinLocksHeld();
return retVal; return retVal;
} }
//===================================================================
static inline void HvCallEvent_openLpEventPath(HvLpIndex targetLp, static inline void HvCallEvent_openLpEventPath(HvLpIndex targetLp,
HvLpEvent_Type type) HvLpEvent_Type type)
{ {
HvCall2(HvCallEventOpenLpEventPath,targetLp,type); HvCall2(HvCallEventOpenLpEventPath, targetLp, type);
// getPaca()->adjustHmtForNoOfSpinLocksHeld(); // getPaca()->adjustHmtForNoOfSpinLocksHeld();
} }
//===================================================================
static inline void HvCallEvent_closeLpEventPath(HvLpIndex targetLp, static inline void HvCallEvent_closeLpEventPath(HvLpIndex targetLp,
HvLpEvent_Type type) HvLpEvent_Type type)
{ {
HvCall2(HvCallEventCloseLpEventPath,targetLp,type); HvCall2(HvCallEventCloseLpEventPath, targetLp, type);
// getPaca()->adjustHmtForNoOfSpinLocksHeld(); // getPaca()->adjustHmtForNoOfSpinLocksHeld();
} }
//===================================================================
static inline HvLpDma_Rc HvCallEvent_dmaBufList(HvLpEvent_Type type, static inline HvLpDma_Rc HvCallEvent_dmaBufList(HvLpEvent_Type type,
HvLpIndex remoteLp, HvLpIndex remoteLp, HvLpDma_Direction direction,
HvLpDma_Direction direction, HvLpInstanceId localInstanceId,
HvLpInstanceId localInstanceId, HvLpInstanceId remoteInstanceId,
HvLpInstanceId remoteInstanceId, HvLpDma_AddressType localAddressType,
HvLpDma_AddressType localAddressType, HvLpDma_AddressType remoteAddressType,
HvLpDma_AddressType remoteAddressType, /* Do these need to be converted to absolute addresses? */
// Do these need to be converted to u64 localBufList, u64 remoteBufList, u32 transferLength)
// absolute addresses?
u64 localBufList,
u64 remoteBufList,
u32 transferLength)
{ {
HvLpDma_Rc retVal; HvLpDma_Rc retVal;
// Pack the misc bits into a single Dword to pass to PLIC // Pack the misc bits into a single Dword to pass to PLIC
union union {
{
struct HvCallEvent_PackedDmaParms parms; struct HvCallEvent_PackedDmaParms parms;
u64 dword; u64 dword;
} packed; } packed;
packed.parms.xDirection = direction; packed.parms.xDirection = direction;
packed.parms.xLocalAddrType = localAddressType; packed.parms.xLocalAddrType = localAddressType;
packed.parms.xRemoteAddrType = remoteAddressType; packed.parms.xRemoteAddrType = remoteAddressType;
...@@ -270,32 +242,27 @@ static inline HvLpDma_Rc HvCallEvent_dmaBufList(HvLpEvent_Type type, ...@@ -270,32 +242,27 @@ static inline HvLpDma_Rc HvCallEvent_dmaBufList(HvLpEvent_Type type,
packed.parms.xRemoteInstId = remoteInstanceId; packed.parms.xRemoteInstId = remoteInstanceId;
retVal = (HvLpDma_Rc)HvCall4(HvCallEventDmaBufList, retVal = (HvLpDma_Rc)HvCall4(HvCallEventDmaBufList,
packed.dword, packed.dword, localBufList, remoteBufList,
localBufList, transferLength);
remoteBufList,
transferLength);
// getPaca()->adjustHmtForNoOfSpinLocksHeld(); // getPaca()->adjustHmtForNoOfSpinLocksHeld();
return retVal; return retVal;
} }
//=================================================================
static inline HvLpDma_Rc HvCallEvent_dmaSingle(HvLpEvent_Type type, static inline HvLpDma_Rc HvCallEvent_dmaSingle(HvLpEvent_Type type,
HvLpIndex remoteLp, HvLpIndex remoteLp, HvLpDma_Direction direction,
HvLpDma_Direction direction, HvLpInstanceId localInstanceId,
HvLpInstanceId localInstanceId, HvLpInstanceId remoteInstanceId,
HvLpInstanceId remoteInstanceId, HvLpDma_AddressType localAddressType,
HvLpDma_AddressType localAddressType, HvLpDma_AddressType remoteAddressType,
HvLpDma_AddressType remoteAddressType, u64 localAddrOrTce, u64 remoteAddrOrTce, u32 transferLength)
u64 localAddrOrTce,
u64 remoteAddrOrTce,
u32 transferLength)
{ {
HvLpDma_Rc retVal; HvLpDma_Rc retVal;
// Pack the misc bits into a single Dword to pass to PLIC // Pack the misc bits into a single Dword to pass to PLIC
union union {
{
struct HvCallEvent_PackedDmaParms parms; struct HvCallEvent_PackedDmaParms parms;
u64 dword; u64 dword;
} packed; } packed;
packed.parms.xDirection = direction; packed.parms.xDirection = direction;
packed.parms.xLocalAddrType = localAddressType; packed.parms.xLocalAddrType = localAddressType;
packed.parms.xRemoteAddrType = remoteAddressType; packed.parms.xRemoteAddrType = remoteAddressType;
...@@ -307,29 +274,24 @@ static inline HvLpDma_Rc HvCallEvent_dmaSingle(HvLpEvent_Type type, ...@@ -307,29 +274,24 @@ static inline HvLpDma_Rc HvCallEvent_dmaSingle(HvLpEvent_Type type,
packed.parms.xRemoteInstId = remoteInstanceId; packed.parms.xRemoteInstId = remoteInstanceId;
retVal = (HvLpDma_Rc)HvCall4(HvCallEventDmaSingle, retVal = (HvLpDma_Rc)HvCall4(HvCallEventDmaSingle,
packed.dword, packed.dword, localAddrOrTce, remoteAddrOrTce,
localAddrOrTce, transferLength);
remoteAddrOrTce,
transferLength);
// getPaca()->adjustHmtForNoOfSpinLocksHeld(); // getPaca()->adjustHmtForNoOfSpinLocksHeld();
return retVal; return retVal;
} }
//=================================================================
static inline HvLpDma_Rc HvCallEvent_dmaToSp(void* local, u32 remote, u32 length, HvLpDma_Direction dir) static inline HvLpDma_Rc HvCallEvent_dmaToSp(void* local, u32 remote,
u32 length, HvLpDma_Direction dir)
{ {
u64 abs_addr; u64 abs_addr;
HvLpDma_Rc retVal; HvLpDma_Rc retVal;
abs_addr = virt_to_absolute( (unsigned long) local );
abs_addr = virt_to_absolute((unsigned long)local);
retVal = (HvLpDma_Rc)HvCall4(HvCallEventDmaToSp, retVal = (HvLpDma_Rc)HvCall4(HvCallEventDmaToSp, abs_addr, remote,
abs_addr, length, dir);
remote,
length,
dir);
// getPaca()->adjustHmtForNoOfSpinLocksHeld(); // getPaca()->adjustHmtForNoOfSpinLocksHeld();
return retVal; return retVal;
} }
//================================================================
#endif // _HVCALLEVENT_H
#endif /* _HVCALLEVENT_H */
...@@ -16,6 +16,8 @@ ...@@ -16,6 +16,8 @@
* along with this program; if not, write to the Free Software * along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/ */
#ifndef _HVCALLHPT_H
#define _HVCALLHPT_H
//============================================================================ //============================================================================
// //
...@@ -24,30 +26,13 @@ ...@@ -24,30 +26,13 @@
// //
//============================================================================ //============================================================================
//------------------------------------------------------------------- #include <asm/iSeries/HvCallSc.h>
// Standard Includes
//-------------------------------------------------------------------
#ifndef _HVCALLSC_H
#include "HvCallSc.h"
#endif
#ifndef _HVTYPES_H
#include <asm/iSeries/HvTypes.h> #include <asm/iSeries/HvTypes.h>
#endif
//-------------------------------------------------------------------
// Other Includes
//-------------------------------------------------------------------
#ifndef _PPC_MMU_H
#include <asm/mmu.h> #include <asm/mmu.h>
#endif
//----------------------------------------------------------------------------- //-----------------------------------------------------------------------------
// Constants // Constants
//----------------------------------------------------------------------------- //-----------------------------------------------------------------------------
#ifndef _HVCALLHPT_H
#define _HVCALLHPT_H
#define HvCallHptGetHptAddress HvCallHpt + 0 #define HvCallHptGetHptAddress HvCallHpt + 0
#define HvCallHptGetHptPages HvCallHpt + 1 #define HvCallHptGetHptPages HvCallHpt + 1
...@@ -139,5 +124,4 @@ static inline void HvCallHpt_addValidate( u32 hpteIndex, ...@@ -139,5 +124,4 @@ static inline void HvCallHpt_addValidate( u32 hpteIndex,
//============================================================================= //=============================================================================
#endif // _HVCALLHPT_H #endif /* _HVCALLHPT_H */
...@@ -31,6 +31,8 @@ ...@@ -31,6 +31,8 @@
// drive the hypervisor from SLIC. // drive the hypervisor from SLIC.
// //
//============================================================================ //============================================================================
#ifndef _HVCALLPCI_H
#define _HVCALLPCI_H
//------------------------------------------------------------------- //-------------------------------------------------------------------
// Forward declarations // Forward declarations
...@@ -39,24 +41,12 @@ ...@@ -39,24 +41,12 @@
//------------------------------------------------------------------- //-------------------------------------------------------------------
// Standard Includes // Standard Includes
//------------------------------------------------------------------- //-------------------------------------------------------------------
#ifndef _HVCALLSC_H #include <asm/iSeries/HvCallSc.h>
#include "HvCallSc.h"
#endif
#ifndef _HVTYPES_H
#include <asm/iSeries/HvTypes.h> #include <asm/iSeries/HvTypes.h>
#endif
//-------------------------------------------------------------------
// Other Includes
//-------------------------------------------------------------------
//----------------------------------------------------------------------------- //-----------------------------------------------------------------------------
// Constants // Constants
//----------------------------------------------------------------------------- //-----------------------------------------------------------------------------
#ifndef _HVCALLPCI_H
#define _HVCALLPCI_H
struct HvCallPci_DsaAddr { // make sure this struct size is 64-bits total struct HvCallPci_DsaAddr { // make sure this struct size is 64-bits total
u16 busNumber; u16 busNumber;
...@@ -694,4 +684,4 @@ static inline int HvCallPci_getBusAdapterVpd(u16 busNumParm, u64 destParm, u16 s ...@@ -694,4 +684,4 @@ static inline int HvCallPci_getBusAdapterVpd(u16 busNumParm, u64 destParm, u16 s
return xRetSize; return xRetSize;
} }
//============================================================================ //============================================================================
#endif // _HVCALLPCI_H #endif /* _HVCALLPCI_H */
...@@ -16,14 +16,11 @@ ...@@ -16,14 +16,11 @@
* along with this program; if not, write to the Free Software * along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/ */
#ifndef _HVTYPES_H
#include <asm/iSeries/HvTypes.h>
#endif
#ifndef _HVCALLSC_H #ifndef _HVCALLSC_H
#define _HVCALLSC_H #define _HVCALLSC_H
#include <asm/iSeries/HvTypes.h>
#define HvCallBase 0x8000000000000000 #define HvCallBase 0x8000000000000000
#define HvCallCc 0x8001000000000000 #define HvCallCc 0x8001000000000000
#define HvCallCfg 0x8002000000000000 #define HvCallCfg 0x8002000000000000
......
...@@ -16,6 +16,8 @@ ...@@ -16,6 +16,8 @@
* along with this program; if not, write to the Free Software * along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/ */
#ifndef _HVCALLSM_H
#define _HVCALLSM_H
//============================================================================ //============================================================================
// //
...@@ -27,19 +29,12 @@ ...@@ -27,19 +29,12 @@
//------------------------------------------------------------------- //-------------------------------------------------------------------
// Standard Includes // Standard Includes
//------------------------------------------------------------------- //-------------------------------------------------------------------
#ifndef _HVCALLSC_H #include <asm/iSeries/HvCallSc.h>
#include "HvCallSc.h"
#endif
#ifndef _HVTYPES_H
#include <asm/iSeries/HvTypes.h> #include <asm/iSeries/HvTypes.h>
#endif
//----------------------------------------------------------------------------- //-----------------------------------------------------------------------------
// Constants // Constants
//----------------------------------------------------------------------------- //-----------------------------------------------------------------------------
#ifndef _HVCALLSM_H
#define _HVCALLSM_H
#define HvCallSmGet64BitsOfAccessMap HvCallSm + 11 #define HvCallSmGet64BitsOfAccessMap HvCallSm + 11
...@@ -54,5 +49,4 @@ static inline u64 HvCallSm_get64BitsOfAccessMap( ...@@ -54,5 +49,4 @@ static inline u64 HvCallSm_get64BitsOfAccessMap(
return retval; return retval;
} }
//============================================================================ //============================================================================
#endif // _HVCALLSM_H #endif /* _HVCALLSM_H */
...@@ -8,6 +8,8 @@ ...@@ -8,6 +8,8 @@
// drive the hypervisor from SLIC. // drive the hypervisor from SLIC.
// //
//============================================================================ //============================================================================
#ifndef _HVCALLXM_H
#define _HVCALLXM_H
//------------------------------------------------------------------- //-------------------------------------------------------------------
// Forward declarations // Forward declarations
...@@ -16,24 +18,12 @@ ...@@ -16,24 +18,12 @@
//------------------------------------------------------------------- //-------------------------------------------------------------------
// Standard Includes // Standard Includes
//------------------------------------------------------------------- //-------------------------------------------------------------------
#ifndef _HVCALLSC_H #include <asm/iSeries/HvCallSc.h>
#include "HvCallSc.h"
#endif
#ifndef _HVTYPES_H
#include <asm/iSeries/HvTypes.h> #include <asm/iSeries/HvTypes.h>
#endif
//-------------------------------------------------------------------
// Other Includes
//-------------------------------------------------------------------
//----------------------------------------------------------------------------- //-----------------------------------------------------------------------------
// Constants // Constants
//----------------------------------------------------------------------------- //-----------------------------------------------------------------------------
#ifndef _HVCALLXM_H
#define _HVCALLXM_H
#define HvCallXmGetTceTableParms HvCallXm + 0 #define HvCallXmGetTceTableParms HvCallXm + 0
#define HvCallXmTestBus HvCallXm + 1 #define HvCallXmTestBus HvCallXm + 1
...@@ -102,5 +92,4 @@ static inline u64 HvCallXm_loadTod(void) ...@@ -102,5 +92,4 @@ static inline u64 HvCallXm_loadTod(void)
} }
//===================================================================================== //=====================================================================================
#endif // _HVCALLXM_H #endif /* _HVCALLXM_H */
...@@ -16,6 +16,8 @@ ...@@ -16,6 +16,8 @@
* along with this program; if not, write to the Free Software * along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/ */
#ifndef _HVLPCONFIG_H
#define _HVLPCONFIG_H
//=========================================================================== //===========================================================================
// //
...@@ -24,24 +26,10 @@ ...@@ -24,24 +26,10 @@
// //
//=========================================================================== //===========================================================================
#ifndef _HVCALLCFG_H #include <asm/iSeries/HvCallCfg.h>
#include "HvCallCfg.h"
#endif
#ifndef _HVTYPES_H
#include <asm/iSeries/HvTypes.h> #include <asm/iSeries/HvTypes.h>
#endif
#ifndef _ITLPNACA_H
#include <asm/iSeries/ItLpNaca.h> #include <asm/iSeries/ItLpNaca.h>
#endif
#ifndef _LPARDATA_H
#include <asm/iSeries/LparData.h> #include <asm/iSeries/LparData.h>
#endif
#ifndef _HVLPCONFIG_H
#define _HVLPCONFIG_H
//------------------------------------------------------------------- //-------------------------------------------------------------------
// Constants // Constants
...@@ -289,4 +277,4 @@ static inline HvLpIndex HvLpConfig_getHostingLpIndex(HvLpIndex lp) ...@@ -289,4 +277,4 @@ static inline HvLpIndex HvLpConfig_getHostingLpIndex(HvLpIndex lp)
} }
//================================================================ //================================================================
#endif // _HVLPCONFIG_H #endif /* _HVLPCONFIG_H */
...@@ -28,10 +28,7 @@ ...@@ -28,10 +28,7 @@
#include <asm/types.h> #include <asm/types.h>
#include <asm/ptrace.h> #include <asm/ptrace.h>
#include <asm/iSeries/HvTypes.h> #include <asm/iSeries/HvTypes.h>
#ifndef _HVCALLEVENT_H
#include <asm/iSeries/HvCallEvent.h> #include <asm/iSeries/HvCallEvent.h>
#endif
//===================================================================== //=====================================================================
// //
......
...@@ -16,6 +16,8 @@ ...@@ -16,6 +16,8 @@
* along with this program; if not, write to the Free Software * along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/ */
#ifndef _HVRELEASEDATA_H
#define _HVRELEASEDATA_H
//============================================================================= //=============================================================================
// //
...@@ -23,15 +25,7 @@ ...@@ -23,15 +25,7 @@
// release so that it can be changed in the future (ie, the virtual // release so that it can be changed in the future (ie, the virtual
// address of the OS's NACA). // address of the OS's NACA).
// //
//----------------------------------------------------------------------------- #include <asm/types.h>
// Standard Includes
//-----------------------------------------------------------------------------
#ifndef _PPC64_TYPES_H
#include <asm/types.h>
#endif
#ifndef _HVRELEASEDATA_H
#define _HVRELEASEDATA_H
//============================================================================= //=============================================================================
// //
...@@ -67,4 +61,4 @@ struct HvReleaseData ...@@ -67,4 +61,4 @@ struct HvReleaseData
char xRsvd3[20]; // Reserved x2C-x3F char xRsvd3[20]; // Reserved x2C-x3F
}; };
#endif // _HVRELEASEDATA_H #endif /* _HVRELEASEDATA_H */
...@@ -16,6 +16,8 @@ ...@@ -16,6 +16,8 @@
* along with this program; if not, write to the Free Software * along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/ */
#ifndef _HVTYPES_H
#define _HVTYPES_H
//=========================================================================== //===========================================================================
// Header File Id // Header File Id
...@@ -29,13 +31,7 @@ ...@@ -29,13 +31,7 @@
// //
//=========================================================================== //===========================================================================
#ifndef _PPC_TYPES_H #include <asm/types.h>
#include <asm/types.h>
#endif
#ifndef _HVTYPES_H
#define _HVTYPES_H
//------------------------------------------------------------------- //-------------------------------------------------------------------
// Typedefs // Typedefs
...@@ -124,4 +120,4 @@ struct HvLpBufferList { ...@@ -124,4 +120,4 @@ struct HvLpBufferList {
u64 len; u64 len;
}; };
#endif // _HVTYPES_H #endif /* _HVTYPES_H */
...@@ -16,18 +16,15 @@ ...@@ -16,18 +16,15 @@
* along with this program; if not, write to the Free Software * along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/ */
#ifndef _IOHRIPROCESSORVPD_H
#define _IOHRIPROCESSORVPD_H
//=================================================================== //===================================================================
// //
// This struct maps Processor Vpd that is DMAd to SLIC by CSP // This struct maps Processor Vpd that is DMAd to SLIC by CSP
// //
#ifndef _TYPES_H
#include <asm/types.h> #include <asm/types.h>
#endif
#ifndef _IOHRIPROCESSORVPD_H
#define _IOHRIPROCESSORVPD_H
struct IoHriProcessorVpd struct IoHriProcessorVpd
{ {
...@@ -87,4 +84,5 @@ struct IoHriProcessorVpd ...@@ -87,4 +84,5 @@ struct IoHriProcessorVpd
char xProcSrc[72]; // CSP format SRC xB8-xFF char xProcSrc[72]; // CSP format SRC xB8-xFF
}; };
#endif // _IOHRIPROCESSORVPD_H
#endif /* _IOHRIPROCESSORVPD_H */
...@@ -16,6 +16,8 @@ ...@@ -16,6 +16,8 @@
* along with this program; if not, write to the Free Software * along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/ */
#ifndef _ITEXTVPDPANEL_H
#define _ITEXTVPDPANEL_H
/* /*
* *
...@@ -31,12 +33,8 @@ ...@@ -31,12 +33,8 @@
* Standard Includes * Standard Includes
*------------------------------------------------------------------- *-------------------------------------------------------------------
*/ */
#ifndef _PPC_TYPES_H #include <asm/types.h>
#include <asm/types.h>
#endif
#ifndef _ITEXTVPDPANEL_H
#define _ITEXTVPDPANEL_H
struct ItExtVpdPanel struct ItExtVpdPanel
{ {
// Definition of the Extended Vpd On Panel Data Area // Definition of the Extended Vpd On Panel Data Area
......
...@@ -16,6 +16,8 @@ ...@@ -16,6 +16,8 @@
* along with this program; if not, write to the Free Software * along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/ */
#ifndef _ITIPLPARMSREAL_H
#define _ITIPLPARMSREAL_H
//============================================================================== //==============================================================================
// //
...@@ -31,12 +33,7 @@ ...@@ -31,12 +33,7 @@
//------------------------------------------------------------------- //-------------------------------------------------------------------
// Standard Includes // Standard Includes
//------------------------------------------------------------------- //-------------------------------------------------------------------
#ifndef _PPC_TYPES_H #include <asm/types.h>
#include <asm/types.h>
#endif
#ifndef _ITIPLPARMSREAL_H
#define _ITIPLPARMSREAL_H
struct ItIplParmsReal struct ItIplParmsReal
{ {
...@@ -75,4 +72,5 @@ struct ItIplParmsReal ...@@ -75,4 +72,5 @@ struct ItIplParmsReal
u64 xRsvd12; // Reserved x30-x37 u64 xRsvd12; // Reserved x30-x37
u64 xRsvd13; // Reserved x38-x3F u64 xRsvd13; // Reserved x38-x3F
}; };
#endif // _ITIPLPARMSREAL_H
#endif /* _ITIPLPARMSREAL_H */
...@@ -16,6 +16,8 @@ ...@@ -16,6 +16,8 @@
* along with this program; if not, write to the Free Software * along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/ */
#ifndef _ITLPNACA_H
#define _ITLPNACA_H
//============================================================================= //=============================================================================
// //
...@@ -24,10 +26,6 @@ ...@@ -24,10 +26,6 @@
// //
//============================================================================= //=============================================================================
#ifndef _ITLPNACA_H
#define _ITLPNACA_H
struct ItLpNaca struct ItLpNaca
{ {
//============================================================================= //=============================================================================
...@@ -87,4 +85,4 @@ struct ItLpNaca ...@@ -87,4 +85,4 @@ struct ItLpNaca
//============================================================================= //=============================================================================
#endif // _ITLPNACA_H #endif /* _ITLPNACA_H */
...@@ -16,6 +16,8 @@ ...@@ -16,6 +16,8 @@
* along with this program; if not, write to the Free Software * along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/ */
#ifndef _ITLPPACA_H
#define _ITLPPACA_H
//============================================================================= //=============================================================================
// //
...@@ -24,13 +26,7 @@ ...@@ -24,13 +26,7 @@
// //
// //
//---------------------------------------------------------------------------- //----------------------------------------------------------------------------
#ifndef _PPC_TYPES_H
#include <asm/types.h> #include <asm/types.h>
#endif
#ifndef _ITLPPACA_H
#define _ITLPPACA_H
struct ItLpPaca struct ItLpPaca
{ {
...@@ -134,4 +130,5 @@ struct ItLpPaca ...@@ -134,4 +130,5 @@ struct ItLpPaca
}; };
#endif // _ITLPPACA_H
#endif /* _ITLPPACA_H */
...@@ -16,6 +16,8 @@ ...@@ -16,6 +16,8 @@
* along with this program; if not, write to the Free Software * along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/ */
#ifndef _ITLPQUEUE_H
#define _ITLPQUEUE_H
//============================================================================= //=============================================================================
// //
...@@ -24,18 +26,11 @@ ...@@ -24,18 +26,11 @@
// events to an LP. // events to an LP.
// //
#ifndef _PPC_TYPES_H
#include <asm/types.h> #include <asm/types.h>
#endif
#include <asm/ptrace.h> #include <asm/ptrace.h>
struct HvLpEvent; struct HvLpEvent;
#ifndef _ITLPQUEUE_H
#define _ITLPQUEUE_H
#define ITMaxLpQueues 8 #define ITMaxLpQueues 8
#define NotUsed 0 // Queue will not be used by PLIC #define NotUsed 0 // Queue will not be used by PLIC
...@@ -94,6 +89,4 @@ static __inline__ void process_iSeries_events( void ) ...@@ -94,6 +89,4 @@ static __inline__ void process_iSeries_events( void )
: : : "r0", "r3" ); : : : "r0", "r3" );
} }
#endif /* _ITLPQUEUE_H */
//=============================================================================
#endif // _ITLPQUEUE_H
...@@ -16,6 +16,8 @@ ...@@ -16,6 +16,8 @@
* along with this program; if not, write to the Free Software * along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/ */
#ifndef _ITLPREGSAVE_H
#define _ITLPREGSAVE_H
//===================================================================================== //=====================================================================================
// //
...@@ -24,9 +26,6 @@ ...@@ -24,9 +26,6 @@
// //
// //
#ifndef _ITLPREGSAVE_H
#define _ITLPREGSAVE_H
struct ItLpRegSave struct ItLpRegSave
{ {
u32 xDesc; // Eye catcher "LpRS" ebcdic 000-003 u32 xDesc; // Eye catcher "LpRS" ebcdic 000-003
...@@ -84,4 +83,5 @@ struct ItLpRegSave ...@@ -84,4 +83,5 @@ struct ItLpRegSave
u8 xRsvd3[176]; // Reserved 350-3FF u8 xRsvd3[176]; // Reserved 350-3FF
}; };
#endif // _ITLPREGSAVE_H
#endif /* _ITLPREGSAVE_H */
...@@ -16,6 +16,8 @@ ...@@ -16,6 +16,8 @@
* along with this program; if not, write to the Free Software * along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/ */
#ifndef _ITVPDAREAS_H
#define _ITVPDAREAS_H
//===================================================================================== //=====================================================================================
// //
...@@ -23,13 +25,7 @@ ...@@ -23,13 +25,7 @@
// the OS from PLIC (most of which start from the SP). // the OS from PLIC (most of which start from the SP).
// //
#ifndef _PPC_TYPES_H #include <asm/types.h>
#include <asm/types.h>
#endif
#ifndef _ITVPDAREAS_H
#define _ITVPDAREAS_H
// VPD Entry index is carved in stone - cannot be changed (easily). // VPD Entry index is carved in stone - cannot be changed (easily).
#define ItVpdCecVpd 0 #define ItVpdCecVpd 0
...@@ -97,4 +93,4 @@ struct ItVpdAreas ...@@ -97,4 +93,4 @@ struct ItVpdAreas
void * xSlicVpdAdrs[ItVpdMaxEntries];// Array of VPD buffers 130-1EF void * xSlicVpdAdrs[ItVpdMaxEntries];// Array of VPD buffers 130-1EF
}; };
#endif // _ITVPDAREAS_H #endif /* _ITVPDAREAS_H */
...@@ -16,14 +16,11 @@ ...@@ -16,14 +16,11 @@
* along with this program; if not, write to the Free Software * along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/ */
#ifndef _PPC_TYPES_H
#include <asm/types.h>
#endif
#ifndef _LPARMAP_H #ifndef _LPARMAP_H
#define _LPARMAP_H #define _LPARMAP_H
#include <asm/types.h>
/* The iSeries hypervisor will set up mapping for one or more /* The iSeries hypervisor will set up mapping for one or more
* ESID/VSID pairs (in SLB/segment registers) and will set up * ESID/VSID pairs (in SLB/segment registers) and will set up
* mappings of one or more ranges of pages to VAs. * mappings of one or more ranges of pages to VAs.
......
...@@ -21,9 +21,7 @@ ...@@ -21,9 +21,7 @@
#define _ISERIES_DMA_H #define _ISERIES_DMA_H
#include <asm/types.h> #include <asm/types.h>
#ifndef __LINUX_SPINLOCK_H
#include <linux/spinlock.h> #include <linux/spinlock.h>
#endif
// NUM_TCE_LEVELS defines the largest contiguous block // NUM_TCE_LEVELS defines the largest contiguous block
// of dma (tce) space we can get. NUM_TCE_LEVELS = 10 // of dma (tce) space we can get. NUM_TCE_LEVELS = 10
...@@ -94,4 +92,4 @@ extern void create_virtual_bus_tce_table( void ); ...@@ -94,4 +92,4 @@ extern void create_virtual_bus_tce_table( void );
extern void create_pci_bus_tce_table( unsigned busNumber ); extern void create_pci_bus_tce_table( unsigned busNumber );
#endif // _ISERIES_DMA_H #endif /* _ISERIES_DMA_H */
#ifndef _ISERIES_IO_H
#define _ISERIES_IO_H
#include <linux/config.h> #include <linux/config.h>
#ifdef CONFIG_PPC_ISERIES #ifdef CONFIG_PPC_ISERIES
#ifndef _ISERIES_IO_H
#define _ISERIES_IO_H
#include <linux/types.h> #include <linux/types.h>
/************************************************************************/ /************************************************************************/
/* File iSeries_io.h created by Allan Trautman on Thu Dec 28 2000. */ /* File iSeries_io.h created by Allan Trautman on Thu Dec 28 2000. */
...@@ -41,6 +42,5 @@ extern void* iSeries_memset_io(void *dest, char x, size_t n); ...@@ -41,6 +42,5 @@ extern void* iSeries_memset_io(void *dest, char x, size_t n);
extern void* iSeries_memcpy_toio(void *dest, void *source, size_t n); extern void* iSeries_memcpy_toio(void *dest, void *source, size_t n);
extern void* iSeries_memcpy_fromio(void *dest, void *source, size_t n); extern void* iSeries_memcpy_fromio(void *dest, void *source, size_t n);
#endif /* _ISERIES_IO_H */ #endif /* CONFIG_PPC_ISERIES */
#endif /* CONFIG_PPC_ISERIES */ #endif /* _ISERIES_IO_H */
#ifndef __ISERIES_IRQ_H__ #ifndef __ISERIES_IRQ_H__
#define __ISERIES_IRQ_H__ #define __ISERIES_IRQ_H__
#ifdef __cplusplus #ifdef __cplusplus
extern "C" { extern "C" {
#endif #endif
unsigned int iSeries_startup_IRQ(unsigned int);
void iSeries_shutdown_IRQ(unsigned int);
void iSeries_enable_IRQ(unsigned int);
void iSeries_disable_IRQ(unsigned int);
void iSeries_end_IRQ(unsigned int);
void iSeries_init_IRQ(void); void iSeries_init_IRQ(void);
void iSeries_init_irqMap(int);
int iSeries_allocate_IRQ(HvBusNumber, HvSubBusNumber, HvAgentId); int iSeries_allocate_IRQ(HvBusNumber, HvSubBusNumber, HvAgentId);
int iSeries_assign_IRQ(int, HvBusNumber, HvSubBusNumber, HvAgentId); int iSeries_assign_IRQ(int, HvBusNumber, HvSubBusNumber, HvAgentId);
void iSeries_activate_IRQs(void); void iSeries_activate_IRQs(void);
......
/* -*- linux-c -*-
* drivers/char/vio.h
*
* iSeries Virtual I/O Message Path header
*
* Authors: Dave Boutcher <boutcher@us.ibm.com>
* Ryan Arnold <ryanarn@us.ibm.com>
* Colin Devilbiss <devilbis@us.ibm.com>
*
* (C) Copyright 2000 IBM Corporation
*
* This header file is used by the iSeries virtual I/O device
* drivers. It defines the interfaces to the common functions
* (implemented in drivers/char/viopath.h) as well as defining
* common functions and structures. Currently (at the time I
* wrote this comment) the iSeries virtual I/O device drivers
* that use this are
* drivers/block/viodasd.c
* drivers/char/viocons.c
* drivers/char/viotape.c
* drivers/cdrom/viocd.c
*
* The iSeries virtual ethernet support (veth.c) uses a whole
* different set of functions.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation; either version 2 of the
* License, or (at your option) anyu later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
#ifndef _VIO_H
#define _VIO_H
#include <asm/iSeries/HvTypes.h>
#include <asm/iSeries/HvLpEvent.h>
/* iSeries virtual I/O events use the subtype field in
* HvLpEvent to figure out what kind of vio event is coming
* in. We use a table to route these, and this defines
* the maximum number of distinct subtypes
*/
#define VIO_MAX_SUBTYPES 7
/* Each subtype can register a handler to process their events.
* The handler must have this interface.
*/
typedef void (vio_event_handler_t) (struct HvLpEvent * event);
int viopath_open(HvLpIndex remoteLp, int subtype, int numReq);
int viopath_close(HvLpIndex remoteLp, int subtype, int numReq);
int vio_setHandler(int subtype, vio_event_handler_t * beh);
int vio_clearHandler(int subtype);
int viopath_isactive(HvLpIndex lp);
HvLpInstanceId viopath_sourceinst(HvLpIndex lp);
HvLpInstanceId viopath_targetinst(HvLpIndex lp);
void vio_set_hostlp(void);
void *vio_get_event_buffer(int subtype);
void vio_free_event_buffer(int subtype, void *buffer);
extern HvLpIndex viopath_hostLp;
extern HvLpIndex viopath_ourLp;
#define VIO_MESSAGE "iSeries virtual I/O: "
#define KERN_DEBUG_VIO KERN_DEBUG VIO_MESSAGE
#define KERN_INFO_VIO KERN_INFO VIO_MESSAGE
#define KERN_WARNING_VIO KERN_WARNING VIO_MESSAGE
#define VIOCHAR_MAX_DATA 200
#define VIOMAJOR_SUBTYPE_MASK 0xff00
#define VIOMINOR_SUBTYPE_MASK 0x00ff
#define VIOMAJOR_SUBTYPE_SHIFT 8
#define VIOVERSION 0x0101
/*
This is the general structure for VIO errors; each module should have a table
of them, and each table should be terminated by an entry of { 0, 0, NULL }.
Then, to find a specific error message, a module should pass its local table
and the return code.
*/
struct vio_error_entry {
u16 rc;
int errno;
const char *msg;
};
const struct vio_error_entry *vio_lookup_rc(const struct vio_error_entry
*local_table, u16 rc);
enum viosubtypes {
viomajorsubtype_monitor = 0x0100,
viomajorsubtype_blockio = 0x0200,
viomajorsubtype_chario = 0x0300,
viomajorsubtype_config = 0x0400,
viomajorsubtype_cdio = 0x0500,
viomajorsubtype_tape = 0x0600
};
enum vioconfigsubtype {
vioconfigget = 0x0001,
};
enum viorc {
viorc_good = 0x0000,
viorc_noConnection = 0x0001,
viorc_noReceiver = 0x0002,
viorc_noBufferAvailable = 0x0003,
viorc_invalidMessageType = 0x0004,
viorc_invalidRange = 0x0201,
viorc_invalidToken = 0x0202,
viorc_DMAError = 0x0203,
viorc_useError = 0x0204,
viorc_releaseError = 0x0205,
viorc_invalidDisk = 0x0206,
viorc_openRejected = 0x0301
};
#endif /* _VIO_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment