Commit 2c24cc13 authored by Andrew Morton's avatar Andrew Morton Committed by Linus Torvalds

[PATCH] ppc64: preliminary iseries support, from Paul Mackerras

From: Anton Blanchard <anton@samba.org>

Preliminary iSeries support.  Still a bit hackish in parts but it does
compile.  The viodasd driver is almost completely untested so don't trust it
with your data.
parent 228ab0bd
......@@ -10,21 +10,26 @@ obj-y := setup.o entry.o traps.o irq.o idle.o \
align.o semaphore.o bitops.o stab.o htab.o pacaData.o \
udbg.o binfmt_elf32.o sys_ppc32.o ioctl32.o \
ptrace32.o signal32.o pmc.o rtc.o init_task.o \
lmb.o pci.o pci_dn.o pci_dma.o cputable.o
lmb.o cputable.o
obj-$(CONFIG_PPC_ISERIES) += iSeries_pci.o iSeries_pci_reset.o \
iSeries_IoMmTable.o iSeries_irq.o \
iSeries_VpdInfo.o XmPciLpEvent.o \
obj-$(CONFIG_PCI) += pci.o pci_dn.o pci_dma.o
ifdef CONFIG_PPC_ISERIES
obj-$(CONFIG_PCI) += iSeries_pci.o iSeries_pci_reset.o \
iSeries_IoMmTable.o
endif
obj-$(CONFIG_PPC_ISERIES) += iSeries_irq.o \
iSeries_VpdInfo.o XmPciLpEvent.o \
HvCall.o HvLpConfig.o LparData.o mf_proc.o \
iSeries_setup.o ItLpQueue.o hvCall.o \
mf.o HvLpEvent.o iSeries_proc.o
mf.o HvLpEvent.o iSeries_proc.o iSeries_htab.o \
proc_pmc.o
obj-$(CONFIG_PPC_PSERIES) += pSeries_pci.o pSeries_lpar.o pSeries_hvCall.o \
eeh.o nvram.o rtasd.o ras.o
# Change this to pSeries only once we've got iSeries up to date
obj-y += open_pic.o xics.o pSeries_htab.o rtas.o \
chrp_setup.o i8259.o prom.o
eeh.o nvram.o rtasd.o ras.o \
open_pic.o xics.o pSeries_htab.o rtas.o \
chrp_setup.o i8259.o prom.o
obj-$(CONFIG_PROC_FS) += proc_ppc64.o
obj-$(CONFIG_RTAS_FLASH) += rtas_flash.o
......@@ -32,5 +37,6 @@ obj-$(CONFIG_SMP) += smp.o
obj-$(CONFIG_MODULES) += module.o ppc_ksyms.o
obj-$(CONFIG_PPC_RTAS) += rtas-proc.o
obj-$(CONFIG_SCANLOG) += scanlog.o
obj-$(CONFIG_VIOPATH) += viopath.o
CFLAGS_ioctl32.o += -Ifs/
......@@ -65,8 +65,6 @@
#include <asm/ppcdebug.h>
#include <asm/cputable.h>
extern volatile unsigned char *chrp_int_ack_special;
void chrp_progress(char *, unsigned short);
extern void openpic_init_IRQ(void);
......
......@@ -275,15 +275,6 @@ _GLOBAL(_switch)
addi r6,r4,-THREAD /* Convert THREAD to 'current' */
std r6,PACACURRENT(r13) /* Set new 'current' */
#ifdef CONFIG_PPC_ISERIES
#error fixme
ld r7,TI_FLAGS(r4) /* Get run light flag */
mfspr r9,CTRLF
srdi r7,r7,1 /* Align to run light bit in CTRL reg */
insrdi r9,r7,1,63 /* Insert run light into CTRL */
mtspr CTRLT,r9
#endif
ld r1,KSP(r4) /* Load new stack pointer */
ld r6,_CCR(r1)
mtcrf 0xFF,r6
......@@ -291,6 +282,15 @@ _GLOBAL(_switch)
REST_8GPRS(14, r1)
REST_10GPRS(22, r1)
#ifdef CONFIG_PPC_ISERIES
clrrdi r7,r1,THREAD_SHIFT /* get current_thread_info() */
ld r7,TI_FLAGS(r7) /* Get run light flag */
mfspr r9,CTRLF
srdi r7,r7,TIF_RUN_LIGHT
insrdi r9,r7,1,63 /* Insert run light into CTRL */
mtspr CTRLT,r9
#endif
/* convert old thread to its task_struct for return value */
addi r3,r3,-THREAD
ld r7,_NIP(r1) /* Return to _switch caller in new task */
......@@ -308,39 +308,16 @@ _GLOBAL(ret_from_fork)
b .ret_from_except
_GLOBAL(ret_from_except)
#ifdef CONFIG_PPC_ISERIES
ld r5,SOFTE(r1)
cmpdi 0,r5,0
beq 4f
irq_recheck:
/* Check for pending interrupts (iSeries) */
CHECKANYINT(r3,r4)
beq+ 4f /* skip do_IRQ if no interrupts */
#warning FIX ISERIES
mfspr r5,SPRG3
li r3,0
stb r3,PACAPROCENABLED(r5) /* ensure we are disabled */
addi r3,r1,STACK_FRAME_OVERHEAD
bl .do_IRQ
b irq_recheck /* loop back and handle more */
4:
#endif
/*
* Disable interrupts so that current_thread_info()->flags
* can't change between when we test it and when we return
* from the interrupt.
*/
recheck:
mfmsr r10 /* Get current interrupt state */
li r4,0
ori r4,r4,MSR_EE
andc r10,r10,r4 /* clear MSR_EE */
mtmsrd r10,1 /* Update machine state */
#ifdef CONFIG_PPC_ISERIES
#error fix iSeries soft disable
#endif
andc r9,r10,r4 /* clear MSR_EE */
mtmsrd r9,1 /* Update machine state */
ld r3,_MSR(r1) /* Returning to user mode? */
andi. r3,r3,MSR_PR
......@@ -364,6 +341,28 @@ recheck:
REST_GPR(13,r1)
restore:
#ifdef CONFIG_PPC_ISERIES
ld r5,SOFTE(r1)
mfspr r4,SPRG3 /* get paca address */
cmpdi 0,r5,0
beq 4f
/* Check for pending interrupts (iSeries) */
/* this is CHECKANYINT except that we already have the paca address */
ld r3,PACALPPACA+LPPACAANYINT(r4)
cmpdi r3,0
beq+ 4f /* skip do_IRQ if no interrupts */
mfspr r13,SPRG3 /* get paca pointer back */
li r3,0
stb r3,PACAPROCENABLED(r13) /* ensure we are soft-disabled */
mtmsrd r10 /* hard-enable again */
addi r3,r1,STACK_FRAME_OVERHEAD
bl .do_IRQ
b .ret_from_except /* loop back and handle more */
4: stb r5,PACAPROCENABLED(r4)
#endif
ld r3,_CTR(r1)
ld r0,_LINK(r1)
mtctr r3
......@@ -377,12 +376,6 @@ restore:
stdcx. r0,0,r1 /* to clear the reservation */
#ifdef DO_SOFT_DISABLE
/* XXX do this in do_work, r13 isnt valid here */
ld r0,SOFTE(r1)
stb r0,PACAPROCENABLED(r13)
#endif
mfmsr r0
li r2, MSR_RI
andc r0,r0,r2
......@@ -407,21 +400,21 @@ restore:
/* Note: this must change if we start using the TIF_NOTIFY_RESUME bit */
do_work:
/* Enable interrupts */
ori r10,r10,MSR_EE
mtmsrd r10,1
andi. r0,r3,_TIF_NEED_RESCHED
beq 1f
bl .schedule
b recheck
b .ret_from_except
1: andi. r0,r3,_TIF_SIGPENDING
beq recheck
beq .ret_from_except
li r3,0
addi r4,r1,STACK_FRAME_OVERHEAD
bl .do_signal
b recheck
b .ret_from_except
#ifdef CONFIG_PPC_PSERIES
/*
* On CHRP, the Run-Time Abstraction Services (RTAS) have to be
* called with the MMU off.
......@@ -632,3 +625,4 @@ _GLOBAL(enter_prom)
mtlr r0
blr /* return to caller */
#endif /* defined(CONFIG_PPC_PSERIES) */
......@@ -91,20 +91,26 @@
.text
.globl _stext
_stext:
#ifdef CONFIG_PPC_PSERIES
_STATIC(__start)
b .__start_initialization_pSeries
#endif
#ifdef CONFIG_PPC_ISERIES
/* At offset 0x20, there is a pointer to iSeries LPAR data.
* This is required by the hypervisor */
/*
* At offset 0x20, there is a pointer to iSeries LPAR data.
* This is required by the hypervisor
*/
. = 0x20
.llong hvReleaseData-KERNELBASE
/* At offset 0x28 and 0x30 are offsets to the msChunks
/*
* At offset 0x28 and 0x30 are offsets to the msChunks
* array (used by the iSeries LPAR debugger to do translation
* between physical addresses and absolute addresses) and
* to the pidhash table (also used by the debugger) */
* to the pidhash table (also used by the debugger)
*/
.llong msChunks-KERNELBASE
.llong pidhash-KERNELBASE
.llong 0 /* pidhash-KERNELBASE SFRXXX */
/* Offset 0x38 - Pointer to start of embedded System.map */
.globl embedded_sysmap_start
......@@ -114,7 +120,7 @@ embedded_sysmap_start:
.globl embedded_sysmap_end
embedded_sysmap_end:
.llong 0
#endif
#else
/* Secondary processors spin on this value until it goes to 1. */
.globl __secondary_hold_spinloop
......@@ -164,6 +170,7 @@ _GLOBAL(__secondary_hold)
BUG_OPCODE
#endif
#endif
#endif
/*
* The following macros define the code that appears as
......@@ -245,6 +252,14 @@ _GLOBAL(__secondary_hold)
std r22,EX_SRR0(r21); /* save SRR0 in exc. frame */ \
ld r23,LPPACA+LPPACASRR1(r20); /* Get SRR1 from ItLpPaca */ \
std r23,EX_SRR1(r21); /* save SRR1 in exc. frame */ \
\
mfspr r23,DAR; /* Save DAR in exc. frame */ \
std r23,EX_DAR(r21); \
mfspr r23,DSISR; /* Save DSISR in exc. frame */ \
stw r23,EX_DSISR(r21); \
mfspr r23,SPRG2; /* Save r20 in exc. frame */ \
std r23,EX_R20(r21); \
\
mfcr r23; /* save CR in r23 */
/*
......@@ -1114,7 +1129,6 @@ _GLOBAL(save_remaining_regs)
SET_REG_TO_CONST(r22, MSR_KERNEL)
#ifdef DO_SOFT_DISABLE
#warning FIX ISERIES
stb r20,PACAPROCENABLED(r13) /* possibly soft enable */
ori r22,r22,MSR_EE /* always hard enable */
#else
......@@ -1220,6 +1234,7 @@ _GLOBAL(__start_initialization_iSeries)
b .start_here_common
#endif
#ifdef CONFIG_PPC_PSERIES
_GLOBAL(__start_initialization_pSeries)
mr r31,r3 /* save parameters */
mr r30,r4
......@@ -1329,6 +1344,7 @@ _STATIC(__after_prom_start)
sub r5,r5,r27
bl .copy_and_flush /* copy the rest */
b .start_here_pSeries
#endif
/*
* Copy routine used to copy the kernel to start at physical address 0
......@@ -1595,6 +1611,7 @@ _GLOBAL(enable_32b_mode)
isync
blr
#ifdef CONFIG_PPC_PSERIES
/*
* This is where the main kernel code starts.
*/
......@@ -1730,6 +1747,7 @@ _STATIC(start_here_pSeries)
mtspr SRR0,r3
mtspr SRR1,r4
rfid
#endif /* CONFIG_PPC_PSERIES */
/* This is where all platforms converge execution */
_STATIC(start_here_common)
......@@ -1804,10 +1822,8 @@ _STATIC(start_here_common)
/* Load up the kernel context */
5:
#ifdef DO_SOFT_DISABLE
#warning FIX ISERIES
mfspr r4,SPRG3
li r5,0
stb r5,PACAPROCENABLED(r4) /* Soft Disabled */
stb r5,PACAPROCENABLED(r13) /* Soft Disabled */
mfmsr r5
ori r5,r5,MSR_EE /* Hard Enabled */
mtmsrd r5
......
......@@ -75,6 +75,7 @@ loop_forever(void)
;
}
#ifdef CONFIG_PPC_PSERIES
static inline void
create_pte_mapping(unsigned long start, unsigned long end,
unsigned long mode, int large)
......@@ -181,6 +182,7 @@ htab_initialize(void)
}
#undef KB
#undef MB
#endif
/*
* find_linux_pte returns the address of a linux pte for a given
......
#define PCIFR(...)
/************************************************************************/
/* This module supports the iSeries I/O Address translation mapping */
/* Copyright (C) 20yy <Allan H Trautman> <IBM Corp> */
......
/*
* iSeries hashtable management.
* Derived from pSeries_htab.c
*
* SMP scalability work:
* Copyright (C) 2001 Anton Blanchard <anton@au.ibm.com>, IBM
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <asm/machdep.h>
#include <asm/pgtable.h>
#include <asm/mmu.h>
#include <asm/mmu_context.h>
#include <asm/iSeries/HvCallHpt.h>
#include <asm/abs_addr.h>
#if 0
#include <linux/spinlock.h>
#include <linux/bitops.h>
#include <linux/threads.h>
#include <linux/smp.h>
#include <asm/tlbflush.h>
#include <asm/tlb.h>
#include <asm/cputable.h>
#endif
static long iSeries_hpte_insert(unsigned long hpte_group, unsigned long va,
unsigned long prpn, int secondary,
unsigned long hpteflags, int bolted, int large)
{
long slot;
HPTE lhpte;
/*
* The hypervisor tries both primary and secondary.
* If we are being called to insert in the secondary,
* it means we have already tried both primary and secondary,
* so we return failure immediately.
*/
if (secondary)
return -1;
slot = HvCallHpt_findValid(&lhpte, va >> PAGE_SHIFT);
if (lhpte.dw0.dw0.v)
panic("select_hpte_slot found entry already valid\n");
if (slot == -1) /* No available entry found in either group */
return -1;
if (slot < 0) { /* MSB set means secondary group */
secondary = 1;
slot &= 0x7fffffffffffffff;
}
lhpte.dw1.dword1 = 0;
lhpte.dw1.dw1.rpn = physRpn_to_absRpn(prpn);
lhpte.dw1.flags.flags = hpteflags;
lhpte.dw0.dword0 = 0;
lhpte.dw0.dw0.avpn = va >> 23;
lhpte.dw0.dw0.h = secondary;
lhpte.dw0.dw0.bolted = bolted;
lhpte.dw0.dw0.v = 1;
/* Now fill in the actual HPTE */
HvCallHpt_addValidate(slot, secondary, &lhpte);
return (secondary << 3) | (slot & 7);
}
static unsigned long iSeries_hpte_getword0(unsigned long slot)
{
unsigned long dword0;
HPTE hpte;
HvCallHpt_get(&hpte, slot);
dword0 = hpte.dw0.dword0;
return dword0;
}
static long iSeries_hpte_remove(unsigned long hpte_group)
{
unsigned long slot_offset;
int i;
HPTE lhpte;
/* Pick a random slot to start at */
slot_offset = mftb() & 0x7;
for (i = 0; i < HPTES_PER_GROUP; i++) {
lhpte.dw0.dword0 =
iSeries_hpte_getword0(hpte_group + slot_offset);
if (!lhpte.dw0.dw0.bolted) {
HvCallHpt_invalidateSetSwBitsGet(hpte_group +
slot_offset, 0, 0);
return i;
}
slot_offset++;
slot_offset &= 0x7;
}
return -1;
}
static long iSeries_hpte_updatepp(unsigned long slot, unsigned long newpp,
unsigned long va, int large, int local)
{
HPTE hpte;
unsigned long avpn = va >> 23;
HvCallHpt_get(&hpte, slot);
if ((hpte.dw0.dw0.avpn == avpn) && (hpte.dw0.dw0.v)) {
HvCallHpt_setPp(slot, newpp);
return 0;
}
return -1;
}
/*
* Functions used to find the PTE for a particular virtual address.
* Only used during boot when bolting pages.
*
* Input : vpn : virtual page number
* Output: PTE index within the page table of the entry
* -1 on failure
*/
static long iSeries_hpte_find(unsigned long vpn)
{
HPTE hpte;
long slot;
/*
* The HvCallHpt_findValid interface is as follows:
* 0xffffffffffffffff : No entry found.
* 0x00000000xxxxxxxx : Entry found in primary group, slot x
* 0x80000000xxxxxxxx : Entry found in secondary group, slot x
*/
slot = HvCallHpt_findValid(&hpte, vpn);
if (hpte.dw0.dw0.v) {
if (slot < 0) {
slot &= 0x7fffffffffffffff;
slot = -slot;
}
} else
slot = -1;
return slot;
}
/*
* Update the page protection bits. Intended to be used to create
* guard pages for kernel data structures on pages which are bolted
* in the HPT. Assumes pages being operated on will not be stolen.
* Does not work on large pages.
*
* No need to lock here because we should be the only user.
*/
static void iSeries_hpte_updateboltedpp(unsigned long newpp, unsigned long ea)
{
unsigned long vsid,va,vpn;
long slot;
vsid = get_kernel_vsid(ea);
va = (vsid << 28) | (ea & 0x0fffffff);
vpn = va >> PAGE_SHIFT;
slot = iSeries_hpte_find(vpn);
if (slot == -1)
panic("updateboltedpp: Could not find page to bolt\n");
HvCallHpt_setPp(slot, newpp);
}
static void iSeries_hpte_invalidate(unsigned long slot, unsigned long va,
int large, int local)
{
HPTE lhpte;
unsigned long avpn = va >> 23;
lhpte.dw0.dword0 = iSeries_hpte_getword0(slot);
if ((lhpte.dw0.dw0.avpn == avpn) && lhpte.dw0.dw0.v)
HvCallHpt_invalidateSetSwBitsGet(slot, 0, 0);
}
void hpte_init_iSeries(void)
{
ppc_md.hpte_invalidate = iSeries_hpte_invalidate;
ppc_md.hpte_updatepp = iSeries_hpte_updatepp;
ppc_md.hpte_updateboltedpp = iSeries_hpte_updateboltedpp;
ppc_md.hpte_insert = iSeries_hpte_insert;
ppc_md.hpte_remove = iSeries_hpte_remove;
}
......@@ -40,19 +40,21 @@
#include <asm/iSeries/iSeries_irq.h>
#include <asm/iSeries/XmPciLpEvent.h>
static unsigned int iSeries_startup_IRQ(unsigned int irq);
static void iSeries_shutdown_IRQ(unsigned int irq);
static void iSeries_enable_IRQ(unsigned int irq);
static void iSeries_disable_IRQ(unsigned int irq);
static void iSeries_end_IRQ(unsigned int irq);
hw_irq_controller iSeries_IRQ_handler = {
"iSeries irq controller",
iSeries_startup_IRQ, /* startup */
iSeries_shutdown_IRQ, /* shutdown */
iSeries_enable_IRQ, /* enable */
iSeries_disable_IRQ, /* disable */
NULL, /* ack */
iSeries_end_IRQ, /* end */
NULL /* set_affinity */
static hw_irq_controller iSeries_IRQ_handler = {
.typename = "iSeries irq controller",
.startup = iSeries_startup_IRQ,
.shutdown = iSeries_shutdown_IRQ,
.enable = iSeries_enable_IRQ,
.disable = iSeries_disable_IRQ,
.end = iSeries_end_IRQ
};
struct iSeries_irqEntry {
u32 dsa;
struct iSeries_irqEntry* next;
......@@ -65,73 +67,97 @@ struct iSeries_irqAnchor {
struct iSeries_irqEntry* head;
};
struct iSeries_irqAnchor iSeries_irqMap[NR_IRQS];
static struct iSeries_irqAnchor iSeries_irqMap[NR_IRQS];
#if 0
static void iSeries_init_irqMap(int irq);
#endif
void iSeries_init_irqMap(int irq);
void iSeries_init_irq_desc(irq_desc_t *desc)
{
desc->handler = &iSeries_IRQ_handler;
}
/* This is called by init_IRQ. set in ppc_md.init_IRQ by iSeries_setup.c */
/* This is called by init_IRQ. set in ppc_md.init_IRQ by iSeries_setup.c */
void __init iSeries_init_IRQ(void)
{
#if 0
int i;
irq_desc_t *desc;
for (i = 0; i < NR_IRQS; i++) {
irq_desc[i].handler = &iSeries_IRQ_handler;
irq_desc[i].status = 0;
irq_desc[i].status |= IRQ_DISABLED;
irq_desc[i].depth = 1;
desc = get_irq_desc(i);
desc->handler = &iSeries_IRQ_handler;
desc->status = 0;
desc->status |= IRQ_DISABLED;
desc->depth = 1;
iSeries_init_irqMap(i);
}
#endif
/* Register PCI event handler and open an event path */
PPCDBG(PPCDBG_BUSWALK,"Register PCI event handler and open an event path\n");
PPCDBG(PPCDBG_BUSWALK,
"Register PCI event handler and open an event path\n");
XmPciLpEvent_init();
return;
}
/**********************************************************************
#if 0
/*
* Called by iSeries_init_IRQ
* Prevent IRQs 0 and 255 from being used. IRQ 0 appears in
* uninitialized devices. IRQ 255 appears in the PCI interrupt
* line register if a PCI error occurs,
*********************************************************************/
void __init iSeries_init_irqMap(int irq)
*/
static void __init iSeries_init_irqMap(int irq)
{
iSeries_irqMap[irq].valid = (irq == 0 || irq == 255)? 0 : 1;
iSeries_irqMap[irq].valid = ((irq == 0) || (irq == 255)) ? 0 : 1;
iSeries_irqMap[irq].entryCount = 0;
iSeries_irqMap[irq].head = NULL;
}
#endif
/* This is called out of iSeries_scan_slot to allocate an IRQ for an EADS slot */
/* It calculates the irq value for the slot. */
int __init iSeries_allocate_IRQ(HvBusNumber busNumber, HvSubBusNumber subBusNumber, HvAgentId deviceId)
/*
* This is called out of iSeries_scan_slot to allocate an IRQ for an EADS slot
* It calculates the irq value for the slot.
*/
int __init iSeries_allocate_IRQ(HvBusNumber busNumber,
HvSubBusNumber subBusNumber, HvAgentId deviceId)
{
u8 idsel = (deviceId >> 4);
u8 function = deviceId & 0x0F;
int irq = ((((busNumber-1)*16 + (idsel-1)*8 + function)*9/8) % 253) + 2;
return irq;
return ((((busNumber - 1) * 16 + (idsel - 1) * 8
+ function) * 9 / 8) % 253) + 2;
}
/* This is called out of iSeries_scan_slot to assign the EADS slot to its IRQ number */
int __init iSeries_assign_IRQ(int irq, HvBusNumber busNumber, HvSubBusNumber subBusNumber, HvAgentId deviceId)
/*
* This is called out of iSeries_scan_slot to assign the EADS slot
* to its IRQ number
*/
int __init iSeries_assign_IRQ(int irq, HvBusNumber busNumber,
HvSubBusNumber subBusNumber, HvAgentId deviceId)
{
int rc;
u32 dsa = (busNumber << 16) | (subBusNumber << 8) | deviceId;
struct iSeries_irqEntry* newEntry;
struct iSeries_irqEntry *newEntry;
unsigned long flags;
irq_desc_t *desc = get_irq_desc(irq);
if (irq < 0 || irq >= NR_IRQS) {
if ((irq < 0) || (irq >= NR_IRQS))
return -1;
}
newEntry = kmalloc(sizeof(*newEntry), GFP_KERNEL);
if (newEntry == NULL) {
if (newEntry == NULL)
return -ENOMEM;
}
newEntry->dsa = dsa;
newEntry->next = NULL;
/********************************************************************
* Probably not necessary to lock the irq since allocation is only
* done during buswalk, but it should not hurt anything except a
* little performance to be smp safe.
*******************************************************************/
spin_lock_irqsave(&irq_desc[irq].lock, flags);
/*
* Probably not necessary to lock the irq since allocation is only
* done during buswalk, but it should not hurt anything except a
* little performance to be smp safe.
*/
spin_lock_irqsave(&desc->lock, flags);
if (iSeries_irqMap[irq].valid) {
/* Push the new element onto the irq stack */
......@@ -139,26 +165,28 @@ int __init iSeries_assign_IRQ(int irq, HvBusNumber busNumber, HvSubBusNumber sub
iSeries_irqMap[irq].head = newEntry;
++iSeries_irqMap[irq].entryCount;
rc = 0;
PPCDBG(PPCDBG_BUSWALK,"iSeries_assign_IRQ 0x%04X.%02X.%02X = 0x%04X\n",busNumber, subBusNumber, deviceId, irq);
}
else {
printk("PCI: Something is wrong with the iSeries_irqMap. \n");
PPCDBG(PPCDBG_BUSWALK, "iSeries_assign_IRQ 0x%04X.%02X.%02X = 0x%04X\n",
busNumber, subBusNumber, deviceId, irq);
} else {
printk("PCI: Something is wrong with the iSeries_irqMap.\n");
kfree(newEntry);
rc = -1;
}
spin_unlock_irqrestore(&irq_desc[irq].lock, flags);
}
spin_unlock_irqrestore(&desc->lock, flags);
return rc;
}
/* This is called by iSeries_activate_IRQs */
unsigned int iSeries_startup_IRQ(unsigned int irq)
static unsigned int iSeries_startup_IRQ(unsigned int irq)
{
struct iSeries_irqEntry* entry;
struct iSeries_irqEntry *entry;
u32 bus, subBus, deviceId, function, mask;
for(entry=iSeries_irqMap[irq].head; entry!=NULL; entry=entry->next) {
bus = (entry->dsa >> 16) & 0xFFFF;
subBus = (entry->dsa >> 8) & 0xFF;
for (entry = iSeries_irqMap[irq].head; entry != NULL;
entry = entry->next) {
bus = (entry->dsa >> 16) & 0xFFFF;
subBus = (entry->dsa >> 8) & 0xFF;
deviceId = entry->dsa & 0xFF;
function = deviceId & 0x0F;
/* Link the IRQ number to the bridge */
......@@ -166,32 +194,41 @@ unsigned int iSeries_startup_IRQ(unsigned int irq)
/* Unmask bridge interrupts in the FISR */
mask = 0x01010000 << function;
HvCallPci_unmaskFisr(bus, subBus, deviceId, mask);
PPCDBG(PPCDBG_BUSWALK,"iSeries_activate_IRQ 0x%02X.%02X.%02X Irq:0x%02X\n",bus,subBus,deviceId,irq);
PPCDBG(PPCDBG_BUSWALK, "iSeries_activate_IRQ 0x%02X.%02X.%02X Irq:0x%02X\n",
bus, subBus, deviceId, irq);
}
return 0;
}
/* This is called out of iSeries_fixup to activate interrupt
* generation for usable slots */
/*
* This is called out of iSeries_fixup to activate interrupt
* generation for usable slots
*/
void __init iSeries_activate_IRQs()
{
int irq;
unsigned long flags;
for (irq=0; irq < NR_IRQS; irq++) {
spin_lock_irqsave(&irq_desc[irq].lock, flags);
irq_desc[irq].handler->startup(irq);
spin_unlock_irqrestore(&irq_desc[irq].lock, flags);
for (irq = 0; irq < NR_IRQS; irq++) {
irq_desc_t *desc = get_irq_desc(irq);
if (desc && desc->handler && desc->handler->startup) {
spin_lock_irqsave(&desc->lock, flags);
desc->handler->startup(irq);
spin_unlock_irqrestore(&desc->lock, flags);
}
}
}
/* this is not called anywhere currently */
void iSeries_shutdown_IRQ(unsigned int irq) {
struct iSeries_irqEntry* entry;
static void iSeries_shutdown_IRQ(unsigned int irq)
{
struct iSeries_irqEntry *entry;
u32 bus, subBus, deviceId, function, mask;
/* irq should be locked by the caller */
for (entry=iSeries_irqMap[irq].head; entry; entry=entry->next) {
for (entry = iSeries_irqMap[irq].head; entry; entry = entry->next) {
bus = (entry->dsa >> 16) & 0xFFFF;
subBus = (entry->dsa >> 8) & 0xFF;
deviceId = entry->dsa & 0xFF;
......@@ -202,57 +239,60 @@ void iSeries_shutdown_IRQ(unsigned int irq) {
mask = 0x01010000 << function;
HvCallPci_maskFisr(bus, subBus, deviceId, mask);
}
}
/***********************************************************
/*
* This will be called by device drivers (via disable_IRQ)
* to disable INTA in the bridge interrupt status register.
***********************************************************/
void iSeries_disable_IRQ(unsigned int irq)
*/
static void iSeries_disable_IRQ(unsigned int irq)
{
struct iSeries_irqEntry* entry;
struct iSeries_irqEntry *entry;
u32 bus, subBus, deviceId, mask;
/* The IRQ has already been locked by the caller */
for (entry=iSeries_irqMap[irq].head; entry; entry=entry->next) {
bus = (entry->dsa >> 16) & 0xFFFF;
subBus = (entry->dsa >> 8) & 0xFF;
for (entry = iSeries_irqMap[irq].head; entry; entry = entry->next) {
bus = (entry->dsa >> 16) & 0xFFFF;
subBus = (entry->dsa >> 8) & 0xFF;
deviceId = entry->dsa & 0xFF;
/* Mask secondary INTA */
mask = 0x80000000;
HvCallPci_maskInterrupts(bus, subBus, deviceId, mask);
PPCDBG(PPCDBG_BUSWALK,"iSeries_disable_IRQ 0x%02X.%02X.%02X 0x%04X\n",bus,subBus,deviceId,irq);
PPCDBG(PPCDBG_BUSWALK,
"iSeries_disable_IRQ 0x%02X.%02X.%02X 0x%04X\n",
bus, subBus, deviceId, irq);
}
}
/***********************************************************
/*
* This will be called by device drivers (via enable_IRQ)
* to enable INTA in the bridge interrupt status register.
***********************************************************/
void iSeries_enable_IRQ(unsigned int irq)
*/
static void iSeries_enable_IRQ(unsigned int irq)
{
struct iSeries_irqEntry* entry;
struct iSeries_irqEntry *entry;
u32 bus, subBus, deviceId, mask;
/* The IRQ has already been locked by the caller */
for (entry=iSeries_irqMap[irq].head; entry; entry=entry->next) {
bus = (entry->dsa >> 16) & 0xFFFF;
subBus = (entry->dsa >> 8) & 0xFF;
for (entry = iSeries_irqMap[irq].head; entry; entry = entry->next) {
bus = (entry->dsa >> 16) & 0xFFFF;
subBus = (entry->dsa >> 8) & 0xFF;
deviceId = entry->dsa & 0xFF;
/* Unmask secondary INTA */
mask = 0x80000000;
HvCallPci_unmaskInterrupts(bus, subBus, deviceId, mask);
PPCDBG(PPCDBG_BUSWALK,"iSeries_enable_IRQ 0x%02X.%02X.%02X 0x%04X\n",bus,subBus,deviceId,irq);
PPCDBG(PPCDBG_BUSWALK,
"iSeries_enable_IRQ 0x%02X.%02X.%02X 0x%04X\n",
bus, subBus, deviceId, irq);
}
}
/* Need to define this so ppc_irq_dispatch_handler will NOT call
enable_IRQ at the end of interrupt handling. However, this
does nothing because there is not enough information provided
to do the EOI HvCall. This is done by XmPciLpEvent.c */
void iSeries_end_IRQ(unsigned int irq)
/*
* Need to define this so ppc_irq_dispatch_handler will NOT call
* enable_IRQ at the end of interrupt handling. However, this does
* nothing because there is not enough information provided to do
* the EOI HvCall. This is done by XmPciLpEvent.c
*/
static void iSeries_end_IRQ(unsigned int irq)
{
}
#define PCIFR(...)
/*
* iSeries_pci.c
*
......@@ -49,171 +50,160 @@
#include "iSeries_IoMmTable.h"
#include "pci.h"
extern struct pci_controller* hose_head;
extern struct pci_controller** hose_tail;
extern int global_phb_number;
extern int panic_timeout;
extern struct pci_controller *hose_head;
extern struct pci_controller **hose_tail;
extern int global_phb_number;
extern int panic_timeout;
extern struct device_node *allnodes;
extern unsigned long iSeries_Base_Io_Memory;
extern struct pci_ops iSeries_pci_ops;
extern struct flightRecorder* PciFr;
extern struct TceTable* tceTables[256];
/*******************************************************************
* Counters and control flags.
*******************************************************************/
extern long Pci_Io_Read_Count;
extern long Pci_Io_Write_Count;
extern long Pci_Cfg_Read_Count;
extern long Pci_Cfg_Write_Count;
extern long Pci_Error_Count;
extern int Pci_Retry_Max;
extern int Pci_Error_Flag;
extern int Pci_Trace_Flag;
extern struct flightRecorder *PciFr;
extern struct TceTable *tceTables[256];
extern void iSeries_MmIoTest(void);
/*******************************************************************
/*
* Forward declares of prototypes.
*******************************************************************/
struct iSeries_Device_Node* find_Device_Node(struct pci_dev* PciDev);
struct iSeries_Device_Node* get_Device_Node(struct pci_dev* PciDev);
*/
static struct iSeries_Device_Node *find_Device_Node(struct pci_dev *PciDev);
unsigned long find_and_init_phbs(void);
struct pci_controller* alloc_phb(struct device_node *dev, char *model, unsigned int addr_size_words) ;
void iSeries_Scan_PHBs_Slots(struct pci_controller* Phb);
void iSeries_Scan_EADs_Bridge(HvBusNumber Bus, HvSubBusNumber SubBus, int IdSel);
int iSeries_Scan_Bridge_Slot(HvBusNumber Bus, struct HvCallPci_BridgeInfo* Info);
void list_device_nodes(void);
struct pci_dev;
static void iSeries_Scan_PHBs_Slots(struct pci_controller *Phb);
static void iSeries_Scan_EADs_Bridge(HvBusNumber Bus, HvSubBusNumber SubBus,
int IdSel);
static int iSeries_Scan_Bridge_Slot(HvBusNumber Bus,
struct HvCallPci_BridgeInfo *Info);
LIST_HEAD(iSeries_Global_Device_List);
int DeviceCount = 0;
static int DeviceCount;
/* Counters and control flags. */
static long Pci_Io_Read_Count = 0;
static long Pci_Io_Write_Count = 0;
static long Pci_Cfg_Read_Count = 0;
static long Pci_Cfg_Write_Count= 0;
static long Pci_Error_Count = 0;
static int Pci_Retry_Max = 3; /* Only retry 3 times */
static int Pci_Error_Flag = 1; /* Set Retry Error on. */
static int Pci_Trace_Flag = 0;
static long Pci_Io_Read_Count;
static long Pci_Io_Write_Count;
#if 0
static long Pci_Cfg_Read_Count;
static long Pci_Cfg_Write_Count;
#endif
static long Pci_Error_Count;
static int Pci_Retry_Max = 3; /* Only retry 3 times */
static int Pci_Error_Flag = 1; /* Set Retry Error on. */
static int Pci_Trace_Flag;
/**********************************************************************************
/*
* Log Error infor in Flight Recorder to system Console.
* Filter out the device not there errors.
* PCI: EADs Connect Failed 0x18.58.10 Rc: 0x00xx
* PCI: Read Vendor Failed 0x18.58.10 Rc: 0x00xx
* PCI: Connect Bus Unit Failed 0x18.58.10 Rc: 0x00xx
**********************************************************************************/
void pci_Log_Error(char* Error_Text, int Bus, int SubBus, int AgentId, int HvRc)
*/
static void pci_Log_Error(char *Error_Text, int Bus, int SubBus,
int AgentId, int HvRc)
{
if( HvRc != 0x0302) {
if (HvRc != 0x0302) {
char ErrorString[128];
sprintf(ErrorString,"%s Failed: 0x%02X.%02X.%02X Rc: 0x%04X",Error_Text,Bus,SubBus,AgentId,HvRc);
sprintf(ErrorString, "%s Failed: 0x%02X.%02X.%02X Rc: 0x%04X",
Error_Text, Bus, SubBus, AgentId, HvRc);
PCIFR(ErrorString);
printk("PCI: %s\n",ErrorString);
printk("PCI: %s\n", ErrorString);
}
}
/**********************************************************************************
#if 0
/*
* Dump the iSeries Temp Device Node
*<4>buswalk [swapper : - DeviceNode: 0xC000000000634300
*<4>00. Device Node = 0xC000000000634300
*<4> - PciDev = 0x0000000000000000
*<4> - tDevice = 0x 17:01.00 0x1022 00
*<4> 4. Device Node = 0xC000000000634480
*<4> - PciDev = 0x0000000000000000
*<4> - Device = 0x 18:38.16 Irq:0xA7 Vendor:0x1014 Flags:0x00
*<4> - Devfn = 0xB0: 22.18
**********************************************************************************/
void dumpDevice_Node(struct iSeries_Device_Node* DevNode)
{
udbg_printf("Device Node = 0x%p\n",DevNode);
udbg_printf(" - PciDev = 0x%p\n",DevNode->PciDev);
* <4>buswalk [swapper : - DeviceNode: 0xC000000000634300
* <4>00. Device Node = 0xC000000000634300
* <4> - PciDev = 0x0000000000000000
* <4> - tDevice = 0x 17:01.00 0x1022 00
* <4> 4. Device Node = 0xC000000000634480
* <4> - PciDev = 0x0000000000000000
* <4> - Device = 0x 18:38.16 Irq:0xA7 Vendor:0x1014 Flags:0x00
* <4> - Devfn = 0xB0: 22.18
*/
void dumpDevice_Node(struct iSeries_Device_Node *DevNode)
{
udbg_printf("Device Node = 0x%p\n", DevNode);
udbg_printf(" - PciDev = 0x%p\n", DevNode->PciDev);
udbg_printf(" - Device = 0x%4X:%02X.%02X (0x%02X)\n",
ISERIES_BUS(DevNode),
ISERIES_SUBBUS(DevNode),
DevNode->AgentId,
DevNode->DevFn);
udbg_printf(" - LSlot = 0x%02X\n",DevNode->LogicalSlot);
udbg_printf(" - TceTable = 0x%p\n ",DevNode->DevTceTable);
udbg_printf(" - DSA = 0x%04X\n",ISERIES_DSA(DevNode)>>32 );
ISERIES_BUS(DevNode), ISERIES_SUBBUS(DevNode),
DevNode->AgentId, DevNode->DevFn);
udbg_printf(" - LSlot = 0x%02X\n", DevNode->LogicalSlot);
udbg_printf(" - TceTable = 0x%p\n ", DevNode->DevTceTable);
udbg_printf(" - DSA = 0x%04X\n", ISERIES_DSA(DevNode) >> 32);
udbg_printf(" = Irq:0x%02X Vendor:0x%04X Flags:0x%02X\n",
DevNode->Irq,
DevNode->Vendor,
DevNode->Flags );
udbg_printf(" - Location = %s\n",DevNode->CardLocation);
DevNode->Irq, DevNode->Vendor, DevNode->Flags);
udbg_printf(" - Location = %s\n", DevNode->CardLocation);
}
/**********************************************************************************
/*
* Walk down the device node chain
**********************************************************************************/
void list_device_nodes(void)
*/
static void list_device_nodes(void)
{
struct list_head* Device_Node_Ptr = iSeries_Global_Device_List.next;
while(Device_Node_Ptr != &iSeries_Global_Device_List) {
dumpDevice_Node( (struct iSeries_Device_Node*)Device_Node_Ptr );
struct list_head *Device_Node_Ptr = iSeries_Global_Device_List.next;
while (Device_Node_Ptr != &iSeries_Global_Device_List) {
dumpDevice_Node((struct iSeries_Device_Node*)Device_Node_Ptr);
Device_Node_Ptr = Device_Node_Ptr->next;
}
}
#endif
/***********************************************************************
/*
* build_device_node(u16 Bus, int SubBus, u8 DevFn)
*
***********************************************************************/
struct iSeries_Device_Node* build_device_node(HvBusNumber Bus, HvSubBusNumber SubBus, int AgentId, int Function)
*/
static struct iSeries_Device_Node *build_device_node(HvBusNumber Bus,
HvSubBusNumber SubBus, int AgentId, int Function)
{
struct iSeries_Device_Node* DeviceNode;
struct iSeries_Device_Node *DeviceNode;
PPCDBG(PPCDBG_BUSWALK,"-build_device_node 0x%02X.%02X.%02X Function: %02X\n",Bus,SubBus,AgentId, Function);
PPCDBG(PPCDBG_BUSWALK,
"-build_device_node 0x%02X.%02X.%02X Function: %02X\n",
Bus, SubBus, AgentId, Function);
DeviceNode = kmalloc(sizeof(struct iSeries_Device_Node), GFP_KERNEL);
if(DeviceNode == NULL) return NULL;
memset(DeviceNode,0,sizeof(struct iSeries_Device_Node) );
list_add_tail(&DeviceNode->Device_List,&iSeries_Global_Device_List);
/*DeviceNode->DsaAddr = ((u64)Bus<<48)+((u64)SubBus<<40)+((u64)0x10<<32); */
ISERIES_BUS(DeviceNode) = Bus;
ISERIES_SUBBUS(DeviceNode) = SubBus;
DeviceNode->DsaAddr.deviceId = 0x10;
DeviceNode->DsaAddr.barNumber = 0;
DeviceNode->AgentId = AgentId;
DeviceNode->DevFn = PCI_DEVFN(ISERIES_ENCODE_DEVICE(AgentId),Function );
DeviceNode->IoRetry = 0;
if (DeviceNode == NULL)
return NULL;
memset(DeviceNode, 0, sizeof(struct iSeries_Device_Node));
list_add_tail(&DeviceNode->Device_List, &iSeries_Global_Device_List);
/* DeviceNode->DsaAddr =
((u64)Bus << 48) + ((u64)SubBus << 40) + ((u64)0x10 << 32); */
ISERIES_BUS(DeviceNode) = Bus;
ISERIES_SUBBUS(DeviceNode) = SubBus;
DeviceNode->DsaAddr.deviceId = 0x10;
DeviceNode->DsaAddr.barNumber = 0;
DeviceNode->AgentId = AgentId;
DeviceNode->DevFn = PCI_DEVFN(ISERIES_ENCODE_DEVICE(AgentId), Function);
DeviceNode->IoRetry = 0;
iSeries_Get_Location_Code(DeviceNode);
PCIFR("Device 0x%02X.%2X, Node:0x%p ",ISERIES_BUS(DeviceNode),ISERIES_DEVFUN(DeviceNode),DeviceNode);
PCIFR("Device 0x%02X.%2X, Node:0x%p ", ISERIES_BUS(DeviceNode),
ISERIES_DEVFUN(DeviceNode), DeviceNode);
return DeviceNode;
}
/****************************************************************************
*
* Allocate pci_controller(phb) initialized common variables.
*
*****************************************************************************/
struct pci_controller* pci_alloc_pci_controllerX(char *model, enum phb_types controller_type)
/*
* Allocate pci_controller(phb) initialized common variables.
*/
static struct pci_controller *pci_alloc_pci_controllerX(char *model,
enum phb_types controller_type)
{
struct pci_controller *hose;
hose = (struct pci_controller*)kmalloc(sizeof(struct pci_controller), GFP_KERNEL);
if(hose == NULL) return NULL;
hose = (struct pci_controller *)kmalloc(sizeof(struct pci_controller),
GFP_KERNEL);
if (hose == NULL)
return NULL;
memset(hose, 0, sizeof(struct pci_controller));
if(strlen(model) < 8) strcpy(hose->what,model);
else memcpy(hose->what,model,7);
if (strlen(model) < 8)
strcpy(hose->what, model);
else
memcpy(hose->what, model, 7);
hose->type = controller_type;
hose->global_number = global_phb_number;
global_phb_number++;
......@@ -223,8 +213,7 @@ struct pci_controller* pci_alloc_pci_controllerX(char *model, enum phb_types con
return hose;
}
/****************************************************************************
*
/*
* unsigned int __init find_and_init_phbs(void)
*
* Description:
......@@ -232,363 +221,388 @@ struct pci_controller* pci_alloc_pci_controllerX(char *model, enum phb_types con
* PCI buses. The system hypervisor is queried as to the guest partition
* ownership status. A pci_controller is build for any bus which is partially
* owned or fully owned by this guest partition.
****************************************************************************/
*/
unsigned long __init find_and_init_phbs(void)
{
struct pci_controller* phb;
struct pci_controller *phb;
HvBusNumber BusNumber;
PPCDBG(PPCDBG_BUSWALK,"find_and_init_phbs Entry\n");
PPCDBG(PPCDBG_BUSWALK, "find_and_init_phbs Entry\n");
/* Check all possible buses. */
for (BusNumber = 0; BusNumber < 256; BusNumber++) {
int RtnCode = HvCallXm_testBus(BusNumber);
if (RtnCode == 0) {
phb = pci_alloc_pci_controllerX("PHB HV", phb_type_hypervisor);
if(phb == NULL) {
phb = pci_alloc_pci_controllerX("PHB HV",
phb_type_hypervisor);
if (phb == NULL) {
printk("PCI: Allocate pci_controller failed.\n");
PCIFR( "Allocate pci_controller failed.");
PCIFR("Allocate pci_controller failed.");
return -1;
}
phb->pci_mem_offset = phb->local_number = BusNumber;
phb->first_busno = BusNumber;
phb->last_busno = BusNumber;
phb->ops = &iSeries_pci_ops;
phb->first_busno = BusNumber;
phb->last_busno = BusNumber;
phb->ops = &iSeries_pci_ops;
PPCDBG(PPCDBG_BUSWALK, "PCI:Create iSeries pci_controller(%p), Bus: %04X\n",phb,BusNumber);
PCIFR("Create iSeries PHB controller: %04X",BusNumber);
PPCDBG(PPCDBG_BUSWALK, "PCI:Create iSeries pci_controller(%p), Bus: %04X\n",
phb, BusNumber);
PCIFR("Create iSeries PHB controller: %04X", BusNumber);
/***************************************************/
/* Find and connect the devices. */
/***************************************************/
/* Find and connect the devices. */
iSeries_Scan_PHBs_Slots(phb);
}
/* Check for Unexpected Return code, a clue that something */
/* has gone wrong. */
else if(RtnCode != 0x0301) {
PCIFR("Unexpected Return on Probe(0x%04X): 0x%04X",BusNumber,RtnCode);
}
/*
* Check for Unexpected Return code, a clue that something
* has gone wrong.
*/
else if (RtnCode != 0x0301)
PCIFR("Unexpected Return on Probe(0x%04X): 0x%04X",
BusNumber, RtnCode);
}
return 0;
}
/***********************************************************************
/*
* iSeries_pcibios_init
*
* Chance to initialize and structures or variable before PCI Bus walk.
*
*<4>buswalk [swapper : iSeries_pcibios_init Entry.
*<4>buswalk [swapper : IoMmTable Initialized 0xC00000000034BD30
*<4>buswalk [swapper : find_and_init_phbs Entry
*<4>buswalk [swapper : Create iSeries pci_controller:(0xC00000001F5C7000), Bus 0x0017
*<4>buswalk [swapper : Connect EADs: 0x17.00.12 = 0x00
*<4>buswalk [swapper : iSeries_assign_IRQ 0x0017.00.12 = 0x0091
*<4>buswalk [swapper : - allocate and assign IRQ 0x17.00.12 = 0x91
*<4>buswalk [swapper : - FoundDevice: 0x17.28.10 = 0x12AE
*<4>buswalk [swapper : - build_device_node 0x17.28.12
*<4>buswalk [swapper : iSeries_pcibios_init Exit.
***********************************************************************/
* <4>buswalk [swapper : iSeries_pcibios_init Entry.
* <4>buswalk [swapper : IoMmTable Initialized 0xC00000000034BD30
* <4>buswalk [swapper : find_and_init_phbs Entry
* <4>buswalk [swapper : Create iSeries pci_controller:(0xC00000001F5C7000), Bus 0x0017
* <4>buswalk [swapper : Connect EADs: 0x17.00.12 = 0x00
* <4>buswalk [swapper : iSeries_assign_IRQ 0x0017.00.12 = 0x0091
* <4>buswalk [swapper : - allocate and assign IRQ 0x17.00.12 = 0x91
* <4>buswalk [swapper : - FoundDevice: 0x17.28.10 = 0x12AE
* <4>buswalk [swapper : - build_device_node 0x17.28.12
* <4>buswalk [swapper : iSeries_pcibios_init Exit.
*/
void iSeries_pcibios_init(void)
{
PPCDBG(PPCDBG_BUSWALK,"iSeries_pcibios_init Entry.\n");
PPCDBG(PPCDBG_BUSWALK, "iSeries_pcibios_init Entry.\n");
iSeries_IoMmTable_Initialize();
find_and_init_phbs();
pci_assign_all_busses = 0;
PPCDBG(PPCDBG_BUSWALK,"iSeries_pcibios_init Exit.\n");
/* pci_assign_all_busses = 0; SFRXXX*/
PPCDBG(PPCDBG_BUSWALK, "iSeries_pcibios_init Exit.\n");
}
/***********************************************************************
/*
* pcibios_final_fixup(void)
***********************************************************************/
*/
void __init pcibios_final_fixup(void)
{
struct pci_dev* PciDev = NULL;
struct iSeries_Device_Node* DeviceNode;
char Buffer[256];
int DeviceCount = 0;
struct pci_dev *PciDev = NULL;
struct iSeries_Device_Node *DeviceNode;
char Buffer[256];
int DeviceCount = 0;
PPCDBG(PPCDBG_BUSWALK, "iSeries_pcibios_fixup Entry.\n");
PPCDBG(PPCDBG_BUSWALK,"iSeries_pcibios_fixup Entry.\n");
/******************************************************/
/* Fix up at the device node and pci_dev relationship */
/******************************************************/
mf_displaySrc(0xC9000100);
while ((PciDev = pci_find_device(PCI_ANY_ID, PCI_ANY_ID, PciDev)) != NULL) {
while ((PciDev = pci_find_device(PCI_ANY_ID, PCI_ANY_ID, PciDev))
!= NULL) {
DeviceNode = find_Device_Node(PciDev);
if(DeviceNode != NULL) {
if (DeviceNode != NULL) {
++DeviceCount;
PciDev->sysdata = (void*)DeviceNode;
PciDev->sysdata = (void *)DeviceNode;
DeviceNode->PciDev = PciDev;
PPCDBG(PPCDBG_BUSWALK,"PciDev 0x%p <==> DevNode 0x%p\n",PciDev,DeviceNode );
PPCDBG(PPCDBG_BUSWALK,
"PciDev 0x%p <==> DevNode 0x%p\n",
PciDev, DeviceNode);
iSeries_allocateDeviceBars(PciDev);
iSeries_Device_Information(PciDev,Buffer, sizeof(Buffer) );
printk("%d. %s\n",DeviceCount,Buffer);
iSeries_Device_Information(PciDev, Buffer,
sizeof(Buffer));
printk("%d. %s\n", DeviceCount, Buffer);
create_pci_bus_tce_table((unsigned long)DeviceNode);
} else {
printk("PCI: Device Tree not found for 0x%016lX\n",(unsigned long)PciDev);
}
} else
printk("PCI: Device Tree not found for 0x%016lX\n",
(unsigned long)PciDev);
}
iSeries_IoMmTable_Status();
iSeries_activate_IRQs();
mf_displaySrc(0xC9000200);
}
void pcibios_fixup_bus(struct pci_bus* PciBus)
void pcibios_fixup_bus(struct pci_bus *PciBus)
{
PPCDBG(PPCDBG_BUSWALK,"iSeries_pcibios_fixup_bus(0x%04X) Entry.\n",PciBus->number);
PPCDBG(PPCDBG_BUSWALK, "iSeries_pcibios_fixup_bus(0x%04X) Entry.\n",
PciBus->number);
}
/***********************************************************************
* pcibios_fixup_resources(struct pci_dev *dev)
*
***********************************************************************/
void pcibios_fixup_resources(struct pci_dev *PciDev)
{
PPCDBG(PPCDBG_BUSWALK,"pcibios_fixup_resources PciDev %p\n",PciDev);
PPCDBG(PPCDBG_BUSWALK, "fixup_resources PciDev %p\n", PciDev);
}
/********************************************************************************
* Loop through each node function to find usable EADs bridges.
*********************************************************************************/
void iSeries_Scan_PHBs_Slots(struct pci_controller* Phb)
/*
* Loop through each node function to find usable EADs bridges.
*/
static void iSeries_Scan_PHBs_Slots(struct pci_controller *Phb)
{
struct HvCallPci_DeviceInfo* DevInfo;
HvBusNumber Bus = Phb->local_number; /* System Bus */
HvSubBusNumber SubBus = 0; /* EADs is always 0. */
int HvRc = 0;
int IdSel = 1;
int MaxAgents = 8;
struct HvCallPci_DeviceInfo *DevInfo;
HvBusNumber Bus = Phb->local_number; /* System Bus */
HvSubBusNumber SubBus = 0; /* EADs is always 0. */
int HvRc = 0;
int IdSel = 1;
int MaxAgents = 8;
DevInfo = (struct HvCallPci_DeviceInfo*)kmalloc(sizeof(struct HvCallPci_DeviceInfo), GFP_KERNEL);
if(DevInfo == NULL) return;
DevInfo = (struct HvCallPci_DeviceInfo*)
kmalloc(sizeof(struct HvCallPci_DeviceInfo), GFP_KERNEL);
if (DevInfo == NULL)
return;
/********************************************************************************
/*
* Probe for EADs Bridges
********************************************************************************/
*/
for (IdSel=1; IdSel < MaxAgents; ++IdSel) {
HvRc = HvCallPci_getDeviceInfo(Bus, SubBus, IdSel,REALADDR(DevInfo), sizeof(struct HvCallPci_DeviceInfo));
HvRc = HvCallPci_getDeviceInfo(Bus, SubBus, IdSel,
REALADDR(DevInfo),
sizeof(struct HvCallPci_DeviceInfo));
if (HvRc == 0) {
if(DevInfo->deviceType == HvCallPci_NodeDevice) {
if (DevInfo->deviceType == HvCallPci_NodeDevice)
iSeries_Scan_EADs_Bridge(Bus, SubBus, IdSel);
}
else printk("PCI: Invalid System Configuration(0x%02X.\n",DevInfo->deviceType);
else
printk("PCI: Invalid System Configuration(0x%02X.\n",
DevInfo->deviceType);
}
else pci_Log_Error("getDeviceInfo",Bus, SubBus, IdSel,HvRc);
else
pci_Log_Error("getDeviceInfo",Bus, SubBus, IdSel,HvRc);
}
kfree(DevInfo);
}
/********************************************************************************
*
*********************************************************************************/
void iSeries_Scan_EADs_Bridge(HvBusNumber Bus, HvSubBusNumber SubBus, int IdSel)
static void iSeries_Scan_EADs_Bridge(HvBusNumber Bus, HvSubBusNumber SubBus,
int IdSel)
{
struct HvCallPci_BridgeInfo* BridgeInfo;
HvAgentId AgentId;
int Function;
int HvRc;
struct HvCallPci_BridgeInfo *BridgeInfo;
HvAgentId AgentId;
int Function;
int HvRc;
BridgeInfo = (struct HvCallPci_BridgeInfo*)kmalloc(sizeof(struct HvCallPci_BridgeInfo), GFP_KERNEL);
if(BridgeInfo == NULL) return;
BridgeInfo = (struct HvCallPci_BridgeInfo *)
kmalloc(sizeof(struct HvCallPci_BridgeInfo), GFP_KERNEL);
if (BridgeInfo == NULL)
return;
/*********************************************************************
* Note: hvSubBus and irq is always be 0 at this level!
*********************************************************************/
for (Function=0; Function < 8; ++Function) {
/* Note: hvSubBus and irq is always be 0 at this level! */
for (Function = 0; Function < 8; ++Function) {
AgentId = ISERIES_PCI_AGENTID(IdSel, Function);
HvRc = HvCallXm_connectBusUnit(Bus, SubBus, AgentId, 0);
if (HvRc == 0) {
/* Connect EADs: 0x18.00.12 = 0x00 */
PPCDBG(PPCDBG_BUSWALK,"PCI:Connect EADs: 0x%02X.%02X.%02X\n",Bus, SubBus, AgentId);
PCIFR( "Connect EADs: 0x%02X.%02X.%02X", Bus, SubBus, AgentId);
HvRc = HvCallPci_getBusUnitInfo(Bus, SubBus, AgentId,
REALADDR(BridgeInfo), sizeof(struct HvCallPci_BridgeInfo));
PPCDBG(PPCDBG_BUSWALK,
"PCI:Connect EADs: 0x%02X.%02X.%02X\n",
Bus, SubBus, AgentId);
PCIFR("Connect EADs: 0x%02X.%02X.%02X",
Bus, SubBus, AgentId);
HvRc = HvCallPci_getBusUnitInfo(Bus, SubBus, AgentId,
REALADDR(BridgeInfo),
sizeof(struct HvCallPci_BridgeInfo));
if (HvRc == 0) {
PPCDBG(PPCDBG_BUSWALK,"PCI: BridgeInfo, Type:0x%02X, SubBus:0x%02X, MaxAgents:0x%02X, MaxSubBus: 0x%02X, LSlot: 0x%02X\n",
BridgeInfo->busUnitInfo.deviceType,
BridgeInfo->subBusNumber,
BridgeInfo->maxAgents,
BridgeInfo->maxSubBusNumber,
BridgeInfo->logicalSlotNumber);
PCIFR( "BridgeInfo, Type:0x%02X, SubBus:0x%02X, MaxAgents:0x%02X, MaxSubBus: 0x%02X, LSlot: 0x%02X",
BridgeInfo->busUnitInfo.deviceType,
BridgeInfo->subBusNumber,
BridgeInfo->maxAgents,
BridgeInfo->maxSubBusNumber,
BridgeInfo->logicalSlotNumber);
if (BridgeInfo->busUnitInfo.deviceType == HvCallPci_BridgeDevice) {
PPCDBG(PPCDBG_BUSWALK,
"PCI: BridgeInfo, Type:0x%02X, SubBus:0x%02X, MaxAgents:0x%02X, MaxSubBus: 0x%02X, LSlot: 0x%02X\n",
BridgeInfo->busUnitInfo.deviceType,
BridgeInfo->subBusNumber,
BridgeInfo->maxAgents,
BridgeInfo->maxSubBusNumber,
BridgeInfo->logicalSlotNumber);
PCIFR("BridgeInfo, Type:0x%02X, SubBus:0x%02X, MaxAgents:0x%02X, MaxSubBus: 0x%02X, LSlot: 0x%02X",
BridgeInfo->busUnitInfo.deviceType,
BridgeInfo->subBusNumber,
BridgeInfo->maxAgents,
BridgeInfo->maxSubBusNumber,
BridgeInfo->logicalSlotNumber);
if (BridgeInfo->busUnitInfo.deviceType ==
HvCallPci_BridgeDevice) {
/* Scan_Bridge_Slot...: 0x18.00.12 */
iSeries_Scan_Bridge_Slot(Bus,BridgeInfo);
}
else printk("PCI: Invalid Bridge Configuration(0x%02X)",BridgeInfo->busUnitInfo.deviceType);
iSeries_Scan_Bridge_Slot(Bus,
BridgeInfo);
} else
printk("PCI: Invalid Bridge Configuration(0x%02X)",
BridgeInfo->busUnitInfo.deviceType);
}
}
else if(HvRc != 0x000B) pci_Log_Error("EADs Connect",Bus,SubBus,AgentId,HvRc);
else if (HvRc != 0x000B)
pci_Log_Error("EADs Connect",
Bus, SubBus, AgentId, HvRc);
}
kfree(BridgeInfo);
}
/********************************************************************************
*
* This assumes that the node slot is always on the primary bus!
*
*********************************************************************************/
int iSeries_Scan_Bridge_Slot(HvBusNumber Bus, struct HvCallPci_BridgeInfo* BridgeInfo)
/*
* This assumes that the node slot is always on the primary bus!
*/
static int iSeries_Scan_Bridge_Slot(HvBusNumber Bus,
struct HvCallPci_BridgeInfo *BridgeInfo)
{
struct iSeries_Device_Node* DeviceNode;
struct iSeries_Device_Node *DeviceNode;
HvSubBusNumber SubBus = BridgeInfo->subBusNumber;
u16 VendorId = 0;
int HvRc = 0;
u8 Irq = 0;
int IdSel = ISERIES_GET_DEVICE_FROM_SUBBUS(SubBus);
int Function = ISERIES_GET_FUNCTION_FROM_SUBBUS(SubBus);
HvAgentId AgentId = ISERIES_PCI_AGENTID(IdSel, Function);
HvAgentId EADsIdSel = ISERIES_PCI_AGENTID(IdSel, Function);
int FirstSlotId = 0;
/**********************************************************/
/* iSeries_allocate_IRQ.: 0x18.00.12(0xA3) */
/**********************************************************/
Irq = iSeries_allocate_IRQ(Bus, 0, AgentId);
u16 VendorId = 0;
int HvRc = 0;
u8 Irq = 0;
int IdSel = ISERIES_GET_DEVICE_FROM_SUBBUS(SubBus);
int Function = ISERIES_GET_FUNCTION_FROM_SUBBUS(SubBus);
HvAgentId AgentId = ISERIES_PCI_AGENTID(IdSel, Function);
HvAgentId EADsIdSel = ISERIES_PCI_AGENTID(IdSel, Function);
int FirstSlotId = 0;
/* iSeries_allocate_IRQ.: 0x18.00.12(0xA3) */
Irq = iSeries_allocate_IRQ(Bus, 0, AgentId);
iSeries_assign_IRQ(Irq, Bus, 0, AgentId);
PPCDBG(PPCDBG_BUSWALK,"PCI:- allocate and assign IRQ 0x%02X.%02X.%02X = 0x%02X\n",Bus, 0, AgentId, Irq );
PPCDBG(PPCDBG_BUSWALK,
"PCI:- allocate and assign IRQ 0x%02X.%02X.%02X = 0x%02X\n",
Bus, 0, AgentId, Irq);
/****************************************************************************
/*
* Connect all functions of any device found.
****************************************************************************/
*/
for (IdSel = 1; IdSel <= BridgeInfo->maxAgents; ++IdSel) {
for (Function = 0; Function < 8; ++Function) {
AgentId = ISERIES_PCI_AGENTID(IdSel, Function);
HvRc = HvCallXm_connectBusUnit(Bus, SubBus, AgentId, Irq);
if( HvRc == 0) {
HvRc = HvCallPci_configLoad16(Bus, SubBus, AgentId, PCI_VENDOR_ID, &VendorId);
if( HvRc == 0) {
/**********************************************************/
/* FoundDevice: 0x18.28.10 = 0x12AE */
/**********************************************************/
PPCDBG(PPCDBG_BUSWALK,"PCI:- FoundDevice: 0x%02X.%02X.%02X = 0x%04X\n",
Bus, SubBus, AgentId, VendorId);
HvRc = HvCallPci_configStore8(Bus, SubBus, AgentId, PCI_INTERRUPT_LINE, Irq);
if( HvRc != 0) {
pci_Log_Error("PciCfgStore Irq Failed!",Bus,SubBus,AgentId,HvRc);
}
HvRc = HvCallXm_connectBusUnit(Bus, SubBus,
AgentId, Irq);
if (HvRc == 0) {
HvRc = HvCallPci_configLoad16(Bus, SubBus,
AgentId, PCI_VENDOR_ID,
&VendorId);
if (HvRc == 0) {
/* FoundDevice: 0x18.28.10 = 0x12AE */
PPCDBG(PPCDBG_BUSWALK,
"PCI:- FoundDevice: 0x%02X.%02X.%02X = 0x%04X\n",
Bus, SubBus, AgentId, VendorId);
HvRc = HvCallPci_configStore8(Bus,
SubBus, AgentId,
PCI_INTERRUPT_LINE, Irq);
if (HvRc != 0)
pci_Log_Error("PciCfgStore Irq Failed!",
Bus, SubBus,
AgentId, HvRc);
++DeviceCount;
DeviceNode = build_device_node(Bus, SubBus, EADsIdSel, Function);
DeviceNode->Vendor = VendorId;
DeviceNode->Irq = Irq;
DeviceNode->LogicalSlot = BridgeInfo->logicalSlotNumber;
DeviceNode = build_device_node(Bus,
SubBus, EADsIdSel,
Function);
DeviceNode->Vendor = VendorId;
DeviceNode->Irq = Irq;
DeviceNode->LogicalSlot =
BridgeInfo->logicalSlotNumber;
PCIFR("Device(%4d): 0x%02X.%02X.%02X 0x%02X 0x%04X",
DeviceCount,Bus, SubBus, AgentId,
DeviceNode->LogicalSlot,DeviceNode->Vendor);
/***********************************************************
* On the first device/function, assign irq to slot
***********************************************************/
if(Function == 0) {
DeviceCount, Bus, SubBus,
AgentId,
DeviceNode->LogicalSlot,DeviceNode->Vendor);
/*
* On the first device/function,
* assign irq to slot
*/
if (Function == 0) {
FirstSlotId = AgentId;
// AHT iSeries_assign_IRQ(Irq, Bus, SubBus, AgentId);
/* AHT iSeries_assign_IRQ(Irq,
Bus, SubBus, AgentId); */
}
}
else pci_Log_Error("Read Vendor",Bus,SubBus,AgentId,HvRc);
}
else pci_Log_Error("Connect Bus Unit",Bus,SubBus, AgentId,HvRc);
} else
pci_Log_Error("Read Vendor",
Bus, SubBus, AgentId, HvRc);
} else
pci_Log_Error("Connect Bus Unit",
Bus, SubBus, AgentId, HvRc);
} /* for (Function = 0; Function < 8; ++Function) */
} /* for (IdSel = 1; IdSel <= MaxAgents; ++IdSel) */
return HvRc;
}
/************************************************************************/
/* I/0 Memory copy MUST use mmio commands on iSeries */
/* To do; For performance, include the hv call directly */
/************************************************************************/
void* iSeries_memset_io(void* dest, char c, size_t Count)
{
u8 ByteValue = c;
long NumberOfBytes = Count;
char* IoBuffer = dest;
while(NumberOfBytes > 0) {
iSeries_Write_Byte( ByteValue, (void*)IoBuffer );
/*
* I/0 Memory copy MUST use mmio commands on iSeries
* To do; For performance, include the hv call directly
*/
void *iSeries_memset_io(void *dest, char c, size_t Count)
{
u8 ByteValue = c;
long NumberOfBytes = Count;
char *IoBuffer = dest;
while (NumberOfBytes > 0) {
iSeries_Write_Byte(ByteValue, (void *)IoBuffer);
++IoBuffer;
-- NumberOfBytes;
}
return dest;
}
void* iSeries_memcpy_toio(void *dest, void *source, size_t count)
{
char *dst = dest;
char *src = source;
long NumberOfBytes = count;
while(NumberOfBytes > 0) {
iSeries_Write_Byte(*src++, (void*)dst++);
}
void *iSeries_memcpy_toio(void *dest, void *source, size_t count)
{
char *dst = dest;
char *src = source;
long NumberOfBytes = count;
while (NumberOfBytes > 0) {
iSeries_Write_Byte(*src++, (void *)dst++);
-- NumberOfBytes;
}
return dest;
}
void* iSeries_memcpy_fromio(void *dest, void *source, size_t count)
void *iSeries_memcpy_fromio(void *dest, void *source, size_t count)
{
char *dst = dest;
char *src = source;
long NumberOfBytes = count;
while(NumberOfBytes > 0) {
*dst++ = iSeries_Read_Byte( (void*)src++);
long NumberOfBytes = count;
while (NumberOfBytes > 0) {
*dst++ = iSeries_Read_Byte((void *)src++);
-- NumberOfBytes;
}
return dest;
}
/**********************************************************************************
/*
* Look down the chain to find the matching Device Device
**********************************************************************************/
struct iSeries_Device_Node* find_Device_Node(struct pci_dev* PciDev)
*/
static struct iSeries_Device_Node *find_Device_Node(struct pci_dev *PciDev)
{
struct list_head* Device_Node_Ptr = iSeries_Global_Device_List.next;
int Bus = PciDev->bus->number;
struct list_head *Device_Node_Ptr = iSeries_Global_Device_List.next;
int Bus = PciDev->bus->number;
int DevFn = PciDev->devfn;
while(Device_Node_Ptr != &iSeries_Global_Device_List) {
struct iSeries_Device_Node* DevNode = (struct iSeries_Device_Node*)Device_Node_Ptr;
if(Bus == ISERIES_BUS(DevNode) && DevFn == DevNode->DevFn) {
while (Device_Node_Ptr != &iSeries_Global_Device_List) {
struct iSeries_Device_Node *DevNode =
(struct iSeries_Device_Node*)Device_Node_Ptr;
if ((Bus == ISERIES_BUS(DevNode)) && (DevFn == DevNode->DevFn))
return DevNode;
}
Device_Node_Ptr = Device_Node_Ptr->next;
}
return NULL;
}
/******************************************************************/
/* Returns the device node for the passed pci_dev */
/* Sanity Check Node PciDev to passed pci_dev */
/* If none is found, returns a NULL which the client must handle. */
/******************************************************************/
struct iSeries_Device_Node* get_Device_Node(struct pci_dev* PciDev)
#if 0
/*
* Returns the device node for the passed pci_dev
* Sanity Check Node PciDev to passed pci_dev
* If none is found, returns a NULL which the client must handle.
*/
static struct iSeries_Device_Node *get_Device_Node(struct pci_dev *PciDev)
{
struct iSeries_Device_Node* Node;
Node = (struct iSeries_Device_Node*)PciDev->sysdata;
if(Node == NULL ) {
struct iSeries_Device_Node *Node;
Node = (struct iSeries_Device_Node *)PciDev->sysdata;
if (Node == NULL)
Node = find_Device_Node(PciDev);
}
else if(Node->PciDev != PciDev) {
else if (Node->PciDev != PciDev)
Node = find_Device_Node(PciDev);
}
return Node;
}
/**********************************************************************************
*
#endif
/*
* Read PCI Config Space Code
*
**********************************************************************************/
/** BYTE *************************************************************************/
*/
#if 0
/** BYTE ********************************************************************/
int iSeries_Node_read_config_byte(struct iSeries_Device_Node* DevNode, int Offset, u8* ReadValue)
{
u8 ReadData;
......@@ -606,63 +620,25 @@ int iSeries_Node_read_config_byte(struct iSeries_Device_Node* DevNode, int Offse
*ReadValue = ReadData;
return DevNode->ReturnCode;
}
/** WORD *************************************************************************/
int iSeries_Node_read_config_word(struct iSeries_Device_Node* DevNode, int Offset, u16* ReadValue)
{
u16 ReadData;
if(DevNode == NULL) { return 0x301; }
++Pci_Cfg_Read_Count;
DevNode->ReturnCode = HvCallPci_configLoad16(ISERIES_BUS(DevNode),ISERIES_SUBBUS(DevNode),0x10,
Offset,&ReadData);
if(Pci_Trace_Flag == 1) {
PCIFR("RCW: 0x%04X.%02X 0x%04X = 0x%04X",ISERIES_BUS(DevNode),DevNode->DevFn,Offset,ReadData);
}
if(DevNode->ReturnCode != 0 ) {
printk("PCI: RCW: 0x%04X.%02X Error: 0x%04X\n",ISERIES_BUS(DevNode),DevNode->DevFn,DevNode->ReturnCode);
PCIFR( "RCW: 0x%04X.%02X Error: 0x%04X", ISERIES_BUS(DevNode),DevNode->DevFn,DevNode->ReturnCode);
}
*ReadValue = ReadData;
return DevNode->ReturnCode;
}
/** DWORD *************************************************************************/
int iSeries_Node_read_config_dword(struct iSeries_Device_Node* DevNode, int Offset, u32* ReadValue)
{
u32 ReadData;
if(DevNode == NULL) { return 0x301; }
++Pci_Cfg_Read_Count;
DevNode->ReturnCode = HvCallPci_configLoad32(ISERIES_BUS(DevNode),ISERIES_SUBBUS(DevNode),0x10,
Offset,&ReadData);
if(Pci_Trace_Flag == 1) {
PCIFR("RCL: 0x%04X.%02X 0x%04X = 0x%08X",ISERIES_BUS(DevNode),DevNode->DevFn,Offset,ReadData);
}
if(DevNode->ReturnCode != 0 ) {
printk("PCI: RCL: 0x%04X.%02X Error: 0x%04X\n",ISERIES_BUS(DevNode),DevNode->DevFn,DevNode->ReturnCode);
PCIFR( "RCL: 0x%04X.%02X Error: 0x%04X", ISERIES_BUS(DevNode),DevNode->DevFn,DevNode->ReturnCode);
}
*ReadValue = ReadData;
return DevNode->ReturnCode;
}
int iSeries_pci_read_config_byte(struct pci_dev* PciDev, int Offset, u8* ReadValue) {
struct iSeries_Device_Node* DevNode = get_Device_Node(PciDev);
if(DevNode == NULL) return 0x0301;
return iSeries_Node_read_config_byte( DevNode ,Offset,ReadValue);
}
int iSeries_pci_read_config_word(struct pci_dev* PciDev, int Offset, u16* ReadValue) {
struct iSeries_Device_Node* DevNode = get_Device_Node(PciDev);
if(DevNode == NULL) return 0x0301;
return iSeries_Node_read_config_word( DevNode ,Offset,ReadValue );
}
int iSeries_pci_read_config_dword(struct pci_dev* PciDev, int Offset, u32* ReadValue) {
struct iSeries_Device_Node* DevNode = get_Device_Node(PciDev);
if(DevNode == NULL) return 0x0301;
return iSeries_Node_read_config_dword(DevNode ,Offset,ReadValue );
#endif
static int iSeries_pci_read_config(struct pci_bus *bus, unsigned int devfn,
int offset, int size, u32 *val)
{
return PCIBIOS_DEVICE_NOT_FOUND;
}
/**********************************************************************************/
/* */
/* Write PCI Config Space */
/* */
/** BYTE *************************************************************************/
/*
* Write PCI Config Space
*/
#if 0
/** BYTE ********************************************************************/
int iSeries_Node_write_config_byte(struct iSeries_Device_Node* DevNode, int Offset, u8 WriteData)
{
++Pci_Cfg_Write_Count;
......@@ -677,228 +653,220 @@ int iSeries_Node_write_config_byte(struct iSeries_Device_Node* DevNode, int Offs
}
return DevNode->ReturnCode;
}
/** WORD *************************************************************************/
int iSeries_Node_write_config_word(struct iSeries_Device_Node* DevNode, int Offset, u16 WriteData)
{
++Pci_Cfg_Write_Count;
DevNode->ReturnCode = HvCallPci_configStore16(ISERIES_BUS(DevNode),ISERIES_SUBBUS(DevNode),0x10,
Offset,WriteData);
if(Pci_Trace_Flag == 1) {
PCIFR("WCW: 0x%04X.%02X 0x%04X = 0x%04X",ISERIES_BUS(DevNode),DevNode->DevFn,Offset,WriteData);
}
if(DevNode->ReturnCode != 0 ) {
printk("PCI: WCW: 0x%04X.%02X Error: 0x%04X\n",ISERIES_BUS(DevNode),DevNode->DevFn,DevNode->ReturnCode);
PCIFR( "WCW: 0x%04X.%02X Error: 0x%04X", ISERIES_BUS(DevNode),DevNode->DevFn,DevNode->ReturnCode);
}
return DevNode->ReturnCode;
}
/** DWORD *************************************************************************/
int iSeries_Node_write_config_dword(struct iSeries_Device_Node* DevNode, int Offset, u32 WriteData)
{
++Pci_Cfg_Write_Count;
DevNode->ReturnCode = HvCallPci_configStore32(ISERIES_BUS(DevNode),ISERIES_SUBBUS(DevNode),0x10,
Offset,WriteData);
if(Pci_Trace_Flag == 1) {
PCIFR("WCL: 0x%04X.%02X 0x%04X = 0x%08X",ISERIES_BUS(DevNode),DevNode->DevFn,Offset,WriteData);
}
if(DevNode->ReturnCode != 0 ) {
printk("PCI: WCL: 0x%04X.%02X Error: 0x%04X\n",ISERIES_BUS(DevNode),DevNode->DevFn,DevNode->ReturnCode);
PCIFR( "WCL: 0x%04X.%02X Error: 0x%04X", ISERIES_BUS(DevNode),DevNode->DevFn,DevNode->ReturnCode);
}
return DevNode->ReturnCode;
}
int iSeries_pci_write_config_byte( struct pci_dev* PciDev,int Offset, u8 WriteValue)
{
struct iSeries_Device_Node* DevNode = get_Device_Node(PciDev);
if(DevNode == NULL) return 0x0301;
return iSeries_Node_write_config_byte( DevNode,Offset,WriteValue);
}
int iSeries_pci_write_config_word( struct pci_dev* PciDev,int Offset,u16 WriteValue)
{
struct iSeries_Device_Node* DevNode = get_Device_Node(PciDev);
if(DevNode == NULL) return 0x0301;
return iSeries_Node_write_config_word( DevNode,Offset,WriteValue);
}
int iSeries_pci_write_config_dword(struct pci_dev* PciDev,int Offset,u32 WriteValue)
#endif
static int iSeries_pci_write_config(struct pci_bus *bus, unsigned int devfn,
int offset, int size, u32 val)
{
struct iSeries_Device_Node* DevNode = get_Device_Node(PciDev);
if(DevNode == NULL) return 0x0301;
return iSeries_Node_write_config_dword(DevNode,Offset,WriteValue);
return PCIBIOS_DEVICE_NOT_FOUND;
}
/************************************************************************/
/* Branch Table */
/************************************************************************/
struct pci_ops iSeries_pci_ops = {
iSeries_pci_read_config_byte,
iSeries_pci_read_config_word,
iSeries_pci_read_config_dword,
iSeries_pci_write_config_byte,
iSeries_pci_write_config_word,
iSeries_pci_write_config_dword
.read = iSeries_pci_read_config,
.write = iSeries_pci_write_config
};
/************************************************************************
/*
* Check Return Code
* -> On Failure, print and log information.
* Increment Retry Count, if exceeds max, panic partition.
* -> If in retry, print and log success
************************************************************************
*
* PCI: Device 23.90 ReadL I/O Error( 0): 0x1234
* PCI: Device 23.90 ReadL Retry( 1)
* PCI: Device 23.90 ReadL Retry Successful(1)
************************************************************************/
int CheckReturnCode(char* TextHdr, struct iSeries_Device_Node* DevNode, u64 RtnCode)
*/
int CheckReturnCode(char *TextHdr, struct iSeries_Device_Node *DevNode,
u64 RtnCode)
{
if(RtnCode != 0) {
if (RtnCode != 0) {
++Pci_Error_Count;
++DevNode->IoRetry;
PCIFR( "%s: Device 0x%04X:%02X I/O Error(%2d): 0x%04X",
TextHdr,ISERIES_BUS(DevNode),DevNode->DevFn,DevNode->IoRetry,(int)RtnCode);
PCIFR("%s: Device 0x%04X:%02X I/O Error(%2d): 0x%04X",
TextHdr, ISERIES_BUS(DevNode), DevNode->DevFn,
DevNode->IoRetry, (int)RtnCode);
printk("PCI: %s: Device 0x%04X:%02X I/O Error(%2d): 0x%04X\n",
TextHdr,ISERIES_BUS(DevNode),DevNode->DevFn,DevNode->IoRetry,(int)RtnCode);
/*******************************************************/
/* Bump the retry and check for retry count exceeded. */
/* If, Exceeded, panic the system. */
/*******************************************************/
if(DevNode->IoRetry > Pci_Retry_Max && Pci_Error_Flag > 0 ) {
TextHdr, ISERIES_BUS(DevNode), DevNode->DevFn,
DevNode->IoRetry, (int)RtnCode);
/*
* Bump the retry and check for retry count exceeded.
* If, Exceeded, panic the system.
*/
if ((DevNode->IoRetry > Pci_Retry_Max) &&
(Pci_Error_Flag > 0)) {
mf_displaySrc(0xB6000103);
panic_timeout = 0;
panic("PCI: Hardware I/O Error, SRC B6000103, Automatic Reboot Disabled.\n");
panic("PCI: Hardware I/O Error, SRC B6000103, "
"Automatic Reboot Disabled.\n");
}
return -1; /* Retry Try */
}
/********************************************************************
* If retry was in progress, log success and rest retry count *
*********************************************************************/
else if(DevNode->IoRetry > 0) {
/* If retry was in progress, log success and rest retry count */
if (DevNode->IoRetry > 0) {
PCIFR("%s: Device 0x%04X:%02X Retry Successful(%2d).",
TextHdr,ISERIES_BUS(DevNode),DevNode->DevFn,DevNode->IoRetry);
TextHdr, ISERIES_BUS(DevNode), DevNode->DevFn,
DevNode->IoRetry);
DevNode->IoRetry = 0;
return 0;
}
return 0;
}
/************************************************************************/
/* Translate the I/O Address into a device node, bar, and bar offset. */
/* Note: Make sure the passed variable end up on the stack to avoid */
/* the exposure of being device global. */
/************************************************************************/
static inline struct iSeries_Device_Node* xlateIoMmAddress(void* IoAddress,
union HvDsaMap* DsaPtr,
u64* BarOffsetPtr) {
unsigned long BaseIoAddr = (unsigned long)IoAddress-iSeries_Base_Io_Memory;
long TableIndex = BaseIoAddr/iSeries_IoMmTable_Entry_Size;
struct iSeries_Device_Node* DevNode = *(iSeries_IoMmTable +TableIndex);
if(DevNode != NULL) {
DsaPtr->DsaAddr = ISERIES_DSA(DevNode);
DsaPtr->Dsa.barNumber = *(iSeries_IoBarTable+TableIndex);
*BarOffsetPtr = BaseIoAddr % iSeries_IoMmTable_Entry_Size;
}
else {
/*
* Translate the I/O Address into a device node, bar, and bar offset.
* Note: Make sure the passed variable end up on the stack to avoid
* the exposure of being device global.
*/
static inline struct iSeries_Device_Node *xlateIoMmAddress(void *IoAddress,
union HvDsaMap *DsaPtr, u64 *BarOffsetPtr)
{
unsigned long BaseIoAddr =
(unsigned long)IoAddress - iSeries_Base_Io_Memory;
long TableIndex = BaseIoAddr / iSeries_IoMmTable_Entry_Size;
struct iSeries_Device_Node *DevNode = *(iSeries_IoMmTable + TableIndex);
if (DevNode != NULL) {
DsaPtr->DsaAddr = ISERIES_DSA(DevNode);
DsaPtr->Dsa.barNumber = *(iSeries_IoBarTable + TableIndex);
*BarOffsetPtr = BaseIoAddr % iSeries_IoMmTable_Entry_Size;
} else
panic("PCI: Invalid PCI IoAddress detected!\n");
}
return DevNode;
}
/************************************************************************/
/* Read MM I/O Instructions for the iSeries */
/* On MM I/O error, all ones are returned and iSeries_pci_IoError is cal*/
/* else, data is returned in big Endian format. */
/************************************************************************/
/* iSeries_Read_Byte = Read Byte ( 8 bit) */
/* iSeries_Read_Word = Read Word (16 bit) */
/* iSeries_Read_Long = Read Long (32 bit) */
/************************************************************************/
u8 iSeries_Read_Byte(void* IoAddress)
/*
* Read MM I/O Instructions for the iSeries
* On MM I/O error, all ones are returned and iSeries_pci_IoError is cal
* else, data is returned in big Endian format.
*
* iSeries_Read_Byte = Read Byte ( 8 bit)
* iSeries_Read_Word = Read Word (16 bit)
* iSeries_Read_Long = Read Long (32 bit)
*/
u8 iSeries_Read_Byte(void *IoAddress)
{
u64 BarOffset;
union HvDsaMap DsaData;
u64 BarOffset;
union HvDsaMap DsaData;
struct HvCallPci_LoadReturn Return;
struct iSeries_Device_Node* DevNode = xlateIoMmAddress(IoAddress,&DsaData,&BarOffset);
struct iSeries_Device_Node *DevNode =
xlateIoMmAddress(IoAddress, &DsaData, &BarOffset);
do {
++Pci_Io_Read_Count;
HvCall3Ret16(HvCallPciBarLoad8, &Return, DsaData.DsaAddr,BarOffset, 0);
} while (CheckReturnCode("RDB",DevNode, Return.rc) != 0);
HvCall3Ret16(HvCallPciBarLoad8, &Return, DsaData.DsaAddr,
BarOffset, 0);
} while (CheckReturnCode("RDB", DevNode, Return.rc) != 0);
if(Pci_Trace_Flag == 1) PCIFR("RDB: IoAddress 0x%p = 0x%02X",IoAddress, (u8)Return.value);
if (Pci_Trace_Flag == 1)
PCIFR("RDB: IoAddress 0x%p = 0x%02X", IoAddress,
(u8)Return.value);
return (u8)Return.value;
}
u16 iSeries_Read_Word(void* IoAddress)
u16 iSeries_Read_Word(void *IoAddress)
{
u64 BarOffset;
union HvDsaMap DsaData;
u64 BarOffset;
union HvDsaMap DsaData;
struct HvCallPci_LoadReturn Return;
struct iSeries_Device_Node* DevNode = xlateIoMmAddress(IoAddress,&DsaData,&BarOffset);
struct iSeries_Device_Node *DevNode =
xlateIoMmAddress(IoAddress, &DsaData, &BarOffset);
do {
++Pci_Io_Read_Count;
HvCall3Ret16(HvCallPciBarLoad16,&Return, DsaData.DsaAddr,BarOffset, 0);
} while (CheckReturnCode("RDW",DevNode, Return.rc) != 0);
HvCall3Ret16(HvCallPciBarLoad16, &Return, DsaData.DsaAddr,
BarOffset, 0);
} while (CheckReturnCode("RDW", DevNode, Return.rc) != 0);
if(Pci_Trace_Flag == 1) PCIFR("RDW: IoAddress 0x%p = 0x%04X",IoAddress, swab16((u16)Return.value));
if (Pci_Trace_Flag == 1)
PCIFR("RDW: IoAddress 0x%p = 0x%04X", IoAddress,
swab16((u16)Return.value));
return swab16((u16)Return.value);
}
u32 iSeries_Read_Long(void* IoAddress)
u32 iSeries_Read_Long(void *IoAddress)
{
u64 BarOffset;
union HvDsaMap DsaData;
u64 BarOffset;
union HvDsaMap DsaData;
struct HvCallPci_LoadReturn Return;
struct iSeries_Device_Node* DevNode = xlateIoMmAddress(IoAddress,&DsaData,&BarOffset);
struct iSeries_Device_Node *DevNode =
xlateIoMmAddress(IoAddress, &DsaData, &BarOffset);
do {
++Pci_Io_Read_Count;
HvCall3Ret16(HvCallPciBarLoad32,&Return, DsaData.DsaAddr,BarOffset, 0);
} while (CheckReturnCode("RDL",DevNode, Return.rc) != 0);
HvCall3Ret16(HvCallPciBarLoad32, &Return, DsaData.DsaAddr,
BarOffset, 0);
} while (CheckReturnCode("RDL", DevNode, Return.rc) != 0);
if(Pci_Trace_Flag == 1) PCIFR("RDL: IoAddress 0x%p = 0x%04X",IoAddress, swab32((u32)Return.value));
if (Pci_Trace_Flag == 1)
PCIFR("RDL: IoAddress 0x%p = 0x%04X", IoAddress,
swab32((u32)Return.value));
return swab32((u32)Return.value);
}
/************************************************************************/
/* Write MM I/O Instructions for the iSeries */
/************************************************************************/
/* iSeries_Write_Byte = Write Byte (8 bit) */
/* iSeries_Write_Word = Write Word(16 bit) */
/* iSeries_Write_Long = Write Long(32 bit) */
/************************************************************************/
void iSeries_Write_Byte(u8 Data, void* IoAddress)
{
u64 BarOffset;
union HvDsaMap DsaData;
/*
* Write MM I/O Instructions for the iSeries
*
* iSeries_Write_Byte = Write Byte (8 bit)
* iSeries_Write_Word = Write Word(16 bit)
* iSeries_Write_Long = Write Long(32 bit)
*/
void iSeries_Write_Byte(u8 Data, void *IoAddress)
{
u64 BarOffset;
union HvDsaMap DsaData;
struct HvCallPci_LoadReturn Return;
struct iSeries_Device_Node* DevNode = xlateIoMmAddress(IoAddress,&DsaData,&BarOffset);
struct iSeries_Device_Node *DevNode =
xlateIoMmAddress(IoAddress, &DsaData, &BarOffset);
do {
++Pci_Io_Write_Count;
Return.rc = HvCall4(HvCallPciBarStore8, DsaData.DsaAddr,BarOffset, Data, 0);
} while (CheckReturnCode("WWB",DevNode, Return.rc) != 0);
if(Pci_Trace_Flag == 1) PCIFR("WWB: IoAddress 0x%p = 0x%02X",IoAddress,Data);
Return.rc = HvCall4(HvCallPciBarStore8, DsaData.DsaAddr,
BarOffset, Data, 0);
} while (CheckReturnCode("WWB", DevNode, Return.rc) != 0);
if (Pci_Trace_Flag == 1)
PCIFR("WWB: IoAddress 0x%p = 0x%02X", IoAddress, Data);
}
void iSeries_Write_Word(u16 Data, void* IoAddress)
void iSeries_Write_Word(u16 Data, void *IoAddress)
{
u64 BarOffset;
union HvDsaMap DsaData;
u64 BarOffset;
union HvDsaMap DsaData;
struct HvCallPci_LoadReturn Return;
struct iSeries_Device_Node* DevNode = xlateIoMmAddress(IoAddress,&DsaData,&BarOffset);
struct iSeries_Device_Node *DevNode =
xlateIoMmAddress(IoAddress, &DsaData, &BarOffset);
do {
++Pci_Io_Write_Count;
Return.rc = HvCall4(HvCallPciBarStore16,DsaData.DsaAddr,BarOffset, swab16(Data), 0);
} while (CheckReturnCode("WWW",DevNode, Return.rc) != 0);
if(Pci_Trace_Flag == 1) PCIFR("WWW: IoAddress 0x%p = 0x%04X",IoAddress,Data);
Return.rc = HvCall4(HvCallPciBarStore16, DsaData.DsaAddr,
BarOffset, swab16(Data), 0);
} while (CheckReturnCode("WWW", DevNode, Return.rc) != 0);
if (Pci_Trace_Flag == 1)
PCIFR("WWW: IoAddress 0x%p = 0x%04X", IoAddress, Data);
}
void iSeries_Write_Long(u32 Data, void* IoAddress)
void iSeries_Write_Long(u32 Data, void *IoAddress)
{
u64 BarOffset;
union HvDsaMap DsaData;
u64 BarOffset;
union HvDsaMap DsaData;
struct HvCallPci_LoadReturn Return;
struct iSeries_Device_Node* DevNode = xlateIoMmAddress(IoAddress,&DsaData,&BarOffset);
struct iSeries_Device_Node *DevNode =
xlateIoMmAddress(IoAddress, &DsaData, &BarOffset);
do {
++Pci_Io_Write_Count;
Return.rc = HvCall4(HvCallPciBarStore32,DsaData.DsaAddr,BarOffset, swab32(Data), 0);
} while (CheckReturnCode("WWL",DevNode, Return.rc) != 0);
if(Pci_Trace_Flag == 1) PCIFR("WWL: IoAddress 0x%p = 0x%08X",IoAddress, Data);
Return.rc = HvCall4(HvCallPciBarStore32, DsaData.DsaAddr,
BarOffset, swab32(Data), 0);
} while (CheckReturnCode("WWL", DevNode, Return.rc) != 0);
if (Pci_Trace_Flag == 1)
PCIFR("WWL: IoAddress 0x%p = 0x%08X", IoAddress, Data);
}
void pcibios_name_device(struct pci_dev *dev)
{
}
#define PCIFR(...)
/************************************************************************/
/* File iSeries_pci_reset.c created by Allan Trautman on Mar 21 2001. */
/************************************************************************/
......
......@@ -27,9 +27,8 @@
#include <asm/iSeries/iSeries_proc.h>
#endif
static struct proc_dir_entry * iSeries_proc_root = NULL;
static int iSeries_proc_initializationDone = 0;
static struct proc_dir_entry *iSeries_proc_root;
static int iSeries_proc_initializationDone;
static spinlock_t iSeries_proc_lock;
struct iSeries_proc_registration
......@@ -96,21 +95,22 @@ void iSeries_proc_create(void)
{
unsigned long flags;
struct iSeries_proc_registration *reg = NULL;
spin_lock_irqsave(&iSeries_proc_lock, flags);
printk("iSeries_proc: Creating /proc/iSeries\n");
spin_lock_irqsave(&iSeries_proc_lock, flags);
iSeries_proc_root = proc_mkdir("iSeries", 0);
if (!iSeries_proc_root) return;
if (!iSeries_proc_root)
goto out;
MYQUEUEDEQ(&iSeries_queued, reg);
while (reg != NULL) {
(*(reg->functionMember))(iSeries_proc_root);
MYQUEUEDEQ(&iSeries_queued, reg);
}
iSeries_proc_initializationDone = 1;
out:
spin_unlock_irqrestore(&iSeries_proc_lock, flags);
}
......
......@@ -25,6 +25,8 @@
#include <linux/bootmem.h>
#include <linux/initrd.h>
#include <linux/seq_file.h>
#include <linux/kdev_t.h>
#include <linux/major.h>
#include <linux/root_dev.h>
#include <asm/processor.h>
......@@ -53,34 +55,34 @@
#include <asm/iSeries/mf.h>
/* Function Prototypes */
extern void abort(void);
#ifdef CONFIG_PPC_ISERIES
static void build_iSeries_Memory_Map( void );
static void setup_iSeries_cache_sizes( void );
static void iSeries_bolt_kernel(unsigned long saddr, unsigned long eaddr);
#endif
void build_valid_hpte( unsigned long vsid, unsigned long ea, unsigned long pa,
pte_t * ptep, unsigned hpteflags, unsigned bolted );
extern void ppcdbg_initialize(void);
extern void iSeries_pcibios_init(void);
static void build_iSeries_Memory_Map(void);
static void setup_iSeries_cache_sizes(void);
static void iSeries_bolt_kernel(unsigned long saddr, unsigned long eaddr);
void build_valid_hpte(unsigned long vsid, unsigned long ea, unsigned long pa,
pte_t *ptep, unsigned hpteflags, unsigned bolted);
static void iSeries_setup_dprofile(void);
void iSeries_setup_arch(void);
/* Global Variables */
static unsigned long procFreqHz;
static unsigned long procFreqMhz;
static unsigned long procFreqMhzHundreths;
static unsigned long procFreqHz = 0;
static unsigned long procFreqMhz = 0;
static unsigned long procFreqMhzHundreths = 0;
static unsigned long tbFreqHz;
static unsigned long tbFreqMhz;
static unsigned long tbFreqMhzHundreths;
static unsigned long tbFreqHz = 0;
static unsigned long tbFreqMhz = 0;
static unsigned long tbFreqMhzHundreths = 0;
unsigned long dprof_shift;
unsigned long dprof_len;
unsigned int *dprof_buffer;
unsigned long dprof_shift = 0;
unsigned long dprof_len = 0;
unsigned int * dprof_buffer = NULL;
int piranha_simulator;
int piranha_simulator = 0;
int boot_cpuid;
extern char _end[];
......@@ -92,7 +94,7 @@ extern unsigned long embedded_sysmap_end;
extern unsigned long iSeries_recal_tb;
extern unsigned long iSeries_recal_titan;
static int mf_initialized = 0;
static int mf_initialized;
struct MemoryBlock {
unsigned long absStart;
......@@ -106,30 +108,30 @@ struct MemoryBlock {
* and return the number of physical blocks and fill in the array of
* block data.
*/
unsigned long iSeries_process_Condor_mainstore_vpd( struct MemoryBlock *mb_array, unsigned long max_entries )
unsigned long iSeries_process_Condor_mainstore_vpd(struct MemoryBlock *mb_array,
unsigned long max_entries)
{
/* Determine if absolute memory has any
* holes so that we can interpret the
* access map we get back from the hypervisor
* correctly.
*/
unsigned long holeFirstChunk, holeSizeChunks;
unsigned long numMemoryBlocks = 1;
struct IoHriMainStoreSegment4 * msVpd = (struct IoHriMainStoreSegment4 *)xMsVpd;
struct IoHriMainStoreSegment4 *msVpd =
(struct IoHriMainStoreSegment4 *)xMsVpd;
unsigned long holeStart = msVpd->nonInterleavedBlocksStartAdr;
unsigned long holeEnd = msVpd->nonInterleavedBlocksEndAdr;
unsigned long holeEnd = msVpd->nonInterleavedBlocksEndAdr;
unsigned long holeSize = holeEnd - holeStart;
printk("Mainstore_VPD: Condor\n");
/*
* Determine if absolute memory has any
* holes so that we can interpret the
* access map we get back from the hypervisor
* correctly.
*/
mb_array[0].logicalStart = 0;
mb_array[0].logicalEnd = 0x100000000;
mb_array[0].absStart = 0;
mb_array[0].absEnd = 0x100000000;
mb_array[0].logicalEnd = 0x100000000;
mb_array[0].absStart = 0;
mb_array[0].absEnd = 0x100000000;
if ( holeSize ) {
if (holeSize) {
numMemoryBlocks = 2;
holeStart = holeStart & 0x000fffffffffffff;
holeStart = addr_to_chunk(holeStart);
......@@ -138,275 +140,264 @@ unsigned long iSeries_process_Condor_mainstore_vpd( struct MemoryBlock *mb_array
holeSizeChunks = holeSize;
printk( "Main store hole: start chunk = %0lx, size = %0lx chunks\n",
holeFirstChunk, holeSizeChunks );
mb_array[0].logicalEnd = holeFirstChunk;
mb_array[0].absEnd = holeFirstChunk;
mb_array[0].logicalEnd = holeFirstChunk;
mb_array[0].absEnd = holeFirstChunk;
mb_array[1].logicalStart = holeFirstChunk;
mb_array[1].logicalEnd = 0x100000000 - holeSizeChunks;
mb_array[1].absStart = holeFirstChunk + holeSizeChunks;
mb_array[1].absEnd = 0x100000000;
mb_array[1].logicalEnd = 0x100000000 - holeSizeChunks;
mb_array[1].absStart = holeFirstChunk + holeSizeChunks;
mb_array[1].absEnd = 0x100000000;
}
return numMemoryBlocks;
}
#define MaxSegmentAreas 32
#define MaxSegmentAdrRangeBlocks 128
#define MaxAreaRangeBlocks 4
unsigned long iSeries_process_Regatta_mainstore_vpd( struct MemoryBlock *mb_array, unsigned long max_entries )
#define MaxSegmentAreas 32
#define MaxSegmentAdrRangeBlocks 128
#define MaxAreaRangeBlocks 4
unsigned long iSeries_process_Regatta_mainstore_vpd(
struct MemoryBlock *mb_array, unsigned long max_entries)
{
struct IoHriMainStoreSegment5 * msVpdP = (struct IoHriMainStoreSegment5 *)xMsVpd;
struct IoHriMainStoreSegment5 *msVpdP =
(struct IoHriMainStoreSegment5 *)xMsVpd;
unsigned long numSegmentBlocks = 0;
u32 existsBits = msVpdP->msAreaExists;
unsigned long area_num;
printk("Mainstore_VPD: Regatta\n");
for ( area_num = 0; area_num < MaxSegmentAreas; ++area_num ) {
for (area_num = 0; area_num < MaxSegmentAreas; ++area_num ) {
unsigned long numAreaBlocks;
struct IoHriMainStoreArea4 * currentArea;
struct IoHriMainStoreArea4 *currentArea;
if ( existsBits & 0x80000000 ) {
if (existsBits & 0x80000000) {
unsigned long block_num;
currentArea = &msVpdP->msAreaArray[area_num];
numAreaBlocks = currentArea->numAdrRangeBlocks;
printk("ms_vpd: processing area %2ld blocks=%ld", area_num, numAreaBlocks);
for ( block_num = 0; block_num < numAreaBlocks; ++block_num ) {
printk("ms_vpd: processing area %2ld blocks=%ld",
area_num, numAreaBlocks);
for (block_num = 0; block_num < numAreaBlocks;
++block_num ) {
/* Process an address range block */
struct MemoryBlock tempBlock;
unsigned long i;
tempBlock.absStart = (unsigned long)currentArea->xAdrRangeBlock[block_num].blockStart;
tempBlock.absEnd = (unsigned long)currentArea->xAdrRangeBlock[block_num].blockEnd;
tempBlock.absStart =
(unsigned long)currentArea->xAdrRangeBlock[block_num].blockStart;
tempBlock.absEnd =
(unsigned long)currentArea->xAdrRangeBlock[block_num].blockEnd;
tempBlock.logicalStart = 0;
tempBlock.logicalEnd = 0;
printk("\n block %ld absStart=%016lx absEnd=%016lx",
block_num, tempBlock.absStart,
tempBlock.absEnd);
printk("\n block %ld absStart=%016lx absEnd=%016lx", block_num,
tempBlock.absStart, tempBlock.absEnd);
for ( i=0; i<numSegmentBlocks; ++i ) {
if ( mb_array[i].absStart == tempBlock.absStart )
for (i = 0; i < numSegmentBlocks; ++i) {
if (mb_array[i].absStart ==
tempBlock.absStart)
break;
}
if ( i == numSegmentBlocks ) {
if ( numSegmentBlocks == max_entries ) {
if (i == numSegmentBlocks) {
if (numSegmentBlocks == max_entries)
panic("iSeries_process_mainstore_vpd: too many memory blocks");
}
mb_array[numSegmentBlocks] = tempBlock;
++numSegmentBlocks;
}
else {
} else
printk(" (duplicate)");
}
}
printk("\n");
}
existsBits <<= 1;
}
/* Now sort the blocks found into ascending sequence */
if ( numSegmentBlocks > 1 ) {
if (numSegmentBlocks > 1) {
unsigned long m, n;
for ( m=0; m<numSegmentBlocks-1; ++m ) {
for ( n=numSegmentBlocks-1; m<n; --n ) {
if ( mb_array[n].absStart < mb_array[n-1].absStart ) {
for (m = 0; m < numSegmentBlocks - 1; ++m) {
for (n = numSegmentBlocks - 1; m < n; --n) {
if (mb_array[n].absStart <
mb_array[n-1].absStart) {
struct MemoryBlock tempBlock;
tempBlock = mb_array[n];
mb_array[n] = mb_array[n-1];
mb_array[n-1] = tempBlock;
}
}
}
}
/* Assign "logical" addresses to each block. These
/*
* Assign "logical" addresses to each block. These
* addresses correspond to the hypervisor "bitmap" space.
* Convert all addresses into units of 256K chunks.
*/
{
unsigned long i, nextBitmapAddress;
printk("ms_vpd: %ld sorted memory blocks\n", numSegmentBlocks);
nextBitmapAddress = 0;
for ( i=0; i<numSegmentBlocks; ++i ) {
unsigned long length = mb_array[i].absEnd - mb_array[i].absStart;
for (i = 0; i < numSegmentBlocks; ++i) {
unsigned long length = mb_array[i].absEnd -
mb_array[i].absStart;
mb_array[i].logicalStart = nextBitmapAddress;
mb_array[i].logicalEnd = nextBitmapAddress + length;
nextBitmapAddress += length;
printk(" Bitmap range: %016lx - %016lx\n"
" Absolute range: %016lx - %016lx\n",
mb_array[i].logicalStart, mb_array[i].logicalEnd,
" Absolute range: %016lx - %016lx\n",
mb_array[i].logicalStart,
mb_array[i].logicalEnd,
mb_array[i].absStart, mb_array[i].absEnd);
mb_array[i].absStart = addr_to_chunk( mb_array[i].absStart & 0x000fffffffffffff );
mb_array[i].absEnd = addr_to_chunk( mb_array[i].absEnd & 0x000fffffffffffff );
mb_array[i].logicalStart = addr_to_chunk( mb_array[i].logicalStart );
mb_array[i].logicalEnd = addr_to_chunk( mb_array[i].logicalEnd );
mb_array[i].absStart = addr_to_chunk(mb_array[i].absStart &
0x000fffffffffffff);
mb_array[i].absEnd = addr_to_chunk(mb_array[i].absEnd &
0x000fffffffffffff);
mb_array[i].logicalStart =
addr_to_chunk(mb_array[i].logicalStart);
mb_array[i].logicalEnd = addr_to_chunk(mb_array[i].logicalEnd);
}
}
return numSegmentBlocks;
}
unsigned long iSeries_process_mainstore_vpd( struct MemoryBlock *mb_array, unsigned long max_entries )
unsigned long iSeries_process_mainstore_vpd(struct MemoryBlock *mb_array,
unsigned long max_entries)
{
unsigned long i;
unsigned long mem_blocks = 0;
if (cur_cpu_spec->cpu_features & CPU_FTR_SLB)
mem_blocks = iSeries_process_Regatta_mainstore_vpd( mb_array, max_entries );
mem_blocks = iSeries_process_Regatta_mainstore_vpd(mb_array,
max_entries);
else
mem_blocks = iSeries_process_Condor_mainstore_vpd( mb_array, max_entries );
mem_blocks = iSeries_process_Condor_mainstore_vpd(mb_array,
max_entries);
printk("Mainstore_VPD: numMemoryBlocks = %ld \n", mem_blocks);
for ( i=0; i<mem_blocks; ++i ) {
for (i = 0; i < mem_blocks; ++i) {
printk("Mainstore_VPD: block %3ld logical chunks %016lx - %016lx\n"
" abs chunks %016lx - %016lx\n",
i, mb_array[i].logicalStart, mb_array[i].logicalEnd,
mb_array[i].absStart, mb_array[i].absEnd);
}
return mem_blocks;
}
/*
* void __init iSeries_init_early()
*/
void __init
iSeries_init_early(void)
void __init iSeries_init_early(void)
{
#ifdef CONFIG_PPC_ISERIES
ppcdbg_initialize();
#if defined(CONFIG_BLK_DEV_INITRD)
/*
* If the init RAM disk has been configured and there is
* a non-zero starting address for it, set it up
*/
if ( naca->xRamDisk ) {
if (naca->xRamDisk) {
initrd_start = (unsigned long)__va(naca->xRamDisk);
initrd_end = initrd_start + naca->xRamDiskSize * PAGE_SIZE;
initrd_end = initrd_start + naca->xRamDiskSize * PAGE_SIZE;
initrd_below_start_ok = 1; // ramdisk in kernel space
ROOT_DEV = Root_RAM0;
if ( ((rd_size*1024)/PAGE_SIZE) < naca->xRamDiskSize )
rd_size = (naca->xRamDiskSize*PAGE_SIZE)/1024;
if (((rd_size * 1024) / PAGE_SIZE) < naca->xRamDiskSize)
rd_size = (naca->xRamDiskSize * PAGE_SIZE) / 1024;
} else
#endif /* CONFIG_BLK_DEV_INITRD */
{
/* ROOT_DEV = MKDEV( VIODASD_MAJOR, 1 ); */
}
{
/* ROOT_DEV = MKDEV(VIODASD_MAJOR, 1); */
}
iSeries_recal_tb = get_tb();
iSeries_recal_titan = HvCallXm_loadTod();
ppc_md.setup_arch = iSeries_setup_arch;
ppc_md.setup_residual = iSeries_setup_residual;
ppc_md.get_cpuinfo = iSeries_get_cpuinfo;
ppc_md.init_IRQ = iSeries_init_IRQ;
ppc_md.get_irq = iSeries_get_irq;
ppc_md.init = NULL;
ppc_md.setup_arch = iSeries_setup_arch;
ppc_md.setup_residual = iSeries_setup_residual;
ppc_md.get_cpuinfo = iSeries_get_cpuinfo;
ppc_md.init_IRQ = iSeries_init_IRQ;
ppc_md.init_irq_desc = iSeries_init_irq_desc;
ppc_md.get_irq = iSeries_get_irq;
ppc_md.init = NULL;
ppc_md.restart = iSeries_restart;
ppc_md.power_off = iSeries_power_off;
ppc_md.halt = iSeries_halt;
ppc_md.restart = iSeries_restart;
ppc_md.power_off = iSeries_power_off;
ppc_md.halt = iSeries_halt;
ppc_md.get_boot_time = iSeries_get_boot_time;
ppc_md.set_rtc_time = iSeries_set_rtc_time;
ppc_md.get_rtc_time = iSeries_get_rtc_time;
ppc_md.calibrate_decr = iSeries_calibrate_decr;
ppc_md.progress = iSeries_progress;
ppc_md.get_boot_time = iSeries_get_boot_time;
ppc_md.set_rtc_time = iSeries_set_rtc_time;
ppc_md.get_rtc_time = iSeries_get_rtc_time;
ppc_md.calibrate_decr = iSeries_calibrate_decr;
ppc_md.progress = iSeries_progress;
hpte_init_iSeries();
tce_init_iSeries();
/* Initialize the table which translate Linux physical addresses to
/*
* Initialize the table which translate Linux physical addresses to
* AS/400 absolute addresses
*/
build_iSeries_Memory_Map();
setup_iSeries_cache_sizes();
/* Initialize machine-dependency vectors */
#ifdef CONFIG_SMP
smp_init_iSeries();
#endif
if ( itLpNaca.xPirEnvironMode == 0 )
if (itLpNaca.xPirEnvironMode == 0)
piranha_simulator = 1;
#endif
}
/*
* void __init iSeries_init()
*/
void __init
iSeries_init(unsigned long r3, unsigned long r4, unsigned long r5,
void __init iSeries_init(unsigned long r3, unsigned long r4, unsigned long r5,
unsigned long r6, unsigned long r7)
{
/* Associate Lp Event Queue 0 with processor 0 */
HvCallEvent_setLpEventQueueInterruptProc( 0, 0 );
char *p, *q;
{
/* copy the command line parameter from the primary VSP */
char *p, *q;
HvCallEvent_dmaToSp( cmd_line,
2*64*1024,
256,
HvLpDma_Direction_RemoteToLocal );
p = q = cmd_line + 255;
while( p > cmd_line ) {
if ((*p == 0) || (*p == ' ') || (*p == '\n'))
--p;
else
break;
}
if ( p < q )
*(p+1) = 0;
/* Associate Lp Event Queue 0 with processor 0 */
HvCallEvent_setLpEventQueueInterruptProc(0, 0);
/* copy the command line parameter from the primary VSP */
HvCallEvent_dmaToSp(cmd_line, 2 * 64* 1024, 256,
HvLpDma_Direction_RemoteToLocal);
p = q = cmd_line + 255;
while (p > cmd_line) {
if ((*p == 0) || (*p == ' ') || (*p == '\n'))
--p;
else
break;
}
if (p < q)
*(p + 1) = 0;
if (strstr(cmd_line, "dprofile=")) {
char *p, *q;
for (q = cmd_line; (p = strstr(q, "dprofile=")) != 0; ) {
unsigned long size, new_klimit;
q = p + 9;
if (p > cmd_line && p[-1] != ' ')
if ((p > cmd_line) && (p[-1] != ' '))
continue;
dprof_shift = simple_strtoul(q, &q, 0);
dprof_len = (unsigned long)_etext - (unsigned long)_stext;
dprof_len = (unsigned long)_etext -
(unsigned long)_stext;
dprof_len >>= dprof_shift;
size = ((dprof_len * sizeof(unsigned int)) + (PAGE_SIZE-1)) & PAGE_MASK;
dprof_buffer = (unsigned int *)((klimit + (PAGE_SIZE-1)) & PAGE_MASK);
size = ((dprof_len * sizeof(unsigned int)) +
(PAGE_SIZE-1)) & PAGE_MASK;
dprof_buffer = (unsigned int *)((klimit +
(PAGE_SIZE-1)) & PAGE_MASK);
new_klimit = ((unsigned long)dprof_buffer) + size;
lmb_reserve( __pa(klimit), (new_klimit-klimit));
lmb_reserve(__pa(klimit), (new_klimit-klimit));
klimit = new_klimit;
memset( dprof_buffer, 0, size );
memset(dprof_buffer, 0, size);
}
}
iSeries_setup_dprofile();
iSeries_proc_early_init();
iSeries_proc_early_init();
mf_init();
mf_initialized = 1;
mb();
iSeries_proc_callback( &pmc_proc_init );
iSeries_proc_callback(&pmc_proc_init);
}
#ifdef CONFIG_PPC_ISERIES
/*
* The iSeries may have very large memories ( > 128 GB ) and a partition
* may get memory in "chunks" that may be anywhere in the 2**52 real
......@@ -444,9 +435,10 @@ static void __init build_iSeries_Memory_Map(void)
/* Chunk size on iSeries is 256K bytes */
totalChunks = (u32)HvLpConfig_getMsChunks();
klimit = msChunks_alloc(klimit, totalChunks, 1UL<<18);
klimit = msChunks_alloc(klimit, totalChunks, 1UL << 18);
/* Get absolute address of our load area
/*
* Get absolute address of our load area
* and map it to physical address 0
* This guarantees that the loadarea ends up at physical 0
* otherwise, it might not be returned by PLIC as the first
......@@ -456,63 +448,68 @@ static void __init build_iSeries_Memory_Map(void)
loadAreaFirstChunk = (u32)addr_to_chunk(itLpNaca.xLoadAreaAddr);
loadAreaSize = itLpNaca.xLoadAreaChunks;
/* Only add the pages already mapped here.
/*
* Only add the pages already mapped here.
* Otherwise we might add the hpt pages
* The rest of the pages of the load area
* aren't in the HPT yet and can still
* be assigned an arbitrary physical address
*/
if ( (loadAreaSize * 64) > HvPagesToMap )
if ((loadAreaSize * 64) > HvPagesToMap)
loadAreaSize = HvPagesToMap / 64;
loadAreaLastChunk = loadAreaFirstChunk + loadAreaSize - 1;
/* TODO Do we need to do something if the HPT is in the 64MB load area?
/*
* TODO Do we need to do something if the HPT is in the 64MB load area?
* This would be required if the itLpNaca.xLoadAreaChunks includes
* the HPT size
*/
printk( "Mapping load area - physical addr = 0000000000000000\n"
" absolute addr = %016lx\n",
chunk_to_addr(loadAreaFirstChunk) );
printk( "Load area size %dK\n", loadAreaSize*256 );
printk("Mapping load area - physical addr = 0000000000000000\n"
" absolute addr = %016lx\n",
chunk_to_addr(loadAreaFirstChunk));
printk("Load area size %dK\n", loadAreaSize * 256);
for ( nextPhysChunk = 0;
nextPhysChunk < loadAreaSize;
++nextPhysChunk ) {
msChunks.abs[nextPhysChunk] = loadAreaFirstChunk+nextPhysChunk;
}
for (nextPhysChunk = 0; nextPhysChunk < loadAreaSize; ++nextPhysChunk)
msChunks.abs[nextPhysChunk] =
loadAreaFirstChunk + nextPhysChunk;
/* Get absolute address of our HPT and remember it so
/*
* Get absolute address of our HPT and remember it so
* we won't map it to any physical address
*/
hptFirstChunk = (u32)addr_to_chunk(HvCallHpt_getHptAddress());
hptSizePages = (u32)(HvCallHpt_getHptPages());
hptSizeChunks = hptSizePages >> (msChunks.chunk_shift-PAGE_SHIFT);
hptSizePages = (u32)HvCallHpt_getHptPages();
hptSizeChunks = hptSizePages >> (msChunks.chunk_shift - PAGE_SHIFT);
hptLastChunk = hptFirstChunk + hptSizeChunks - 1;
printk( "HPT absolute addr = %016lx, size = %dK\n",
chunk_to_addr(hptFirstChunk), hptSizeChunks*256 );
printk("HPT absolute addr = %016lx, size = %dK\n",
chunk_to_addr(hptFirstChunk), hptSizeChunks * 256);
/* Fill in the htab_data structure */
/* Fill in size of hashed page table */
num_ptegs = hptSizePages * (PAGE_SIZE/(sizeof(HPTE)*HPTES_PER_GROUP));
num_ptegs = hptSizePages *
(PAGE_SIZE / (sizeof(HPTE) * HPTES_PER_GROUP));
htab_data.htab_num_ptegs = num_ptegs;
htab_data.htab_hash_mask = num_ptegs - 1;
/* The actual hashed page table is in the hypervisor, we have no direct access */
/*
* The actual hashed page table is in the hypervisor,
* we have no direct access
*/
htab_data.htab = NULL;
/* Determine if absolute memory has any
/*
* Determine if absolute memory has any
* holes so that we can interpret the
* access map we get back from the hypervisor
* correctly.
*/
numMemoryBlocks = iSeries_process_mainstore_vpd( mb, 32 );
numMemoryBlocks = iSeries_process_mainstore_vpd(mb, 32);
/* Process the main store access map from the hypervisor
/*
* Process the main store access map from the hypervisor
* to build up our physical -> absolute translation table
*/
curBlock = 0;
......@@ -520,30 +517,29 @@ static void __init build_iSeries_Memory_Map(void)
currDword = 0;
moreChunks = totalChunks;
while ( moreChunks ) {
map = HvCallSm_get64BitsOfAccessMap( itLpNaca.xLpIndex,
currDword );
while (moreChunks) {
map = HvCallSm_get64BitsOfAccessMap(itLpNaca.xLpIndex,
currDword);
thisChunk = currChunk;
while ( map ) {
while (map) {
chunkBit = map >> 63;
map <<= 1;
if ( chunkBit ) {
if (chunkBit) {
--moreChunks;
while ( thisChunk >= mb[curBlock].logicalEnd ) {
while (thisChunk >= mb[curBlock].logicalEnd) {
++curBlock;
if ( curBlock >= numMemoryBlocks )
if (curBlock >= numMemoryBlocks)
panic("out of memory blocks");
}
if ( thisChunk < mb[curBlock].logicalStart )
if (thisChunk < mb[curBlock].logicalStart)
panic("memory block error");
absChunk = mb[curBlock].absStart + ( thisChunk - mb[curBlock].logicalStart );
if ( ( ( absChunk < hptFirstChunk ) ||
( absChunk > hptLastChunk ) ) &&
( ( absChunk < loadAreaFirstChunk ) ||
( absChunk > loadAreaLastChunk ) ) ) {
absChunk = mb[curBlock].absStart +
(thisChunk - mb[curBlock].logicalStart);
if (((absChunk < hptFirstChunk) ||
(absChunk > hptLastChunk)) &&
((absChunk < loadAreaFirstChunk) ||
(absChunk > loadAreaLastChunk))) {
msChunks.abs[nextPhysChunk] = absChunk;
++nextPhysChunk;
}
......@@ -553,8 +549,9 @@ static void __init build_iSeries_Memory_Map(void)
++currDword;
currChunk += 64;
}
/* main store size (in chunks) is
/*
* main store size (in chunks) is
* totalChunks - hptSizeChunks
* which should be equal to
* nextPhysChunk
......@@ -562,12 +559,12 @@ static void __init build_iSeries_Memory_Map(void)
systemcfg->physicalMemorySize = chunk_to_addr(nextPhysChunk);
/* Bolt kernel mappings for all of memory */
iSeries_bolt_kernel( 0, systemcfg->physicalMemorySize );
iSeries_bolt_kernel(0, systemcfg->physicalMemorySize);
lmb_init();
lmb_add( 0, systemcfg->physicalMemorySize );
lmb_add(0, systemcfg->physicalMemorySize);
lmb_analyze(); /* ?? */
lmb_reserve( 0, __pa(klimit));
lmb_reserve(0, __pa(klimit));
/*
* Hardcode to GP size. I am not sure where to get this info. DRENG
......@@ -579,59 +576,94 @@ static void __init build_iSeries_Memory_Map(void)
* Set up the variables that describe the cache line sizes
* for this machine.
*/
static void __init setup_iSeries_cache_sizes(void)
{
unsigned int i, n;
unsigned int procIx = get_paca()->xLpPaca.xDynHvPhysicalProcIndex;
systemcfg->iCacheL1Size = xIoHriProcessorVpd[procIx].xInstCacheSize * 1024;
systemcfg->iCacheL1LineSize = xIoHriProcessorVpd[procIx].xInstCacheOperandSize;
systemcfg->dCacheL1Size = xIoHriProcessorVpd[procIx].xDataL1CacheSizeKB * 1024;
systemcfg->dCacheL1LineSize = xIoHriProcessorVpd[procIx].xDataCacheOperandSize;
systemcfg->iCacheL1Size =
xIoHriProcessorVpd[procIx].xInstCacheSize * 1024;
systemcfg->iCacheL1LineSize =
xIoHriProcessorVpd[procIx].xInstCacheOperandSize;
systemcfg->dCacheL1Size =
xIoHriProcessorVpd[procIx].xDataL1CacheSizeKB * 1024;
systemcfg->dCacheL1LineSize =
xIoHriProcessorVpd[procIx].xDataCacheOperandSize;
naca->iCacheL1LinesPerPage = PAGE_SIZE / systemcfg->iCacheL1LineSize;
naca->dCacheL1LinesPerPage = PAGE_SIZE / systemcfg->dCacheL1LineSize;
i = systemcfg->iCacheL1LineSize;
n = 0;
while ((i=(i/2))) ++n;
while ((i = (i / 2)))
++n;
naca->iCacheL1LogLineSize = n;
i = systemcfg->dCacheL1LineSize;
n = 0;
while ((i=(i/2))) ++n;
while ((i = (i / 2)))
++n;
naca->dCacheL1LogLineSize = n;
printk( "D-cache line size = %d\n", (unsigned int)systemcfg->dCacheL1LineSize);
printk( "I-cache line size = %d\n", (unsigned int)systemcfg->iCacheL1LineSize);
printk("D-cache line size = %d\n",
(unsigned int)systemcfg->dCacheL1LineSize);
printk("I-cache line size = %d\n",
(unsigned int)systemcfg->iCacheL1LineSize);
}
/*
* Bolt the kernel addr space into the HPT
* Create a pte. Used during initialization only.
*/
static void iSeries_make_pte(unsigned long va, unsigned long pa,
int mode)
{
HPTE local_hpte, rhpte;
unsigned long hash, vpn;
long slot;
vpn = va >> PAGE_SHIFT;
hash = hpt_hash(vpn, 0);
local_hpte.dw1.dword1 = pa | mode;
local_hpte.dw0.dword0 = 0;
local_hpte.dw0.dw0.avpn = va >> 23;
local_hpte.dw0.dw0.bolted = 1; /* bolted */
local_hpte.dw0.dw0.v = 1;
slot = HvCallHpt_findValid(&rhpte, vpn);
if (slot < 0) {
/* Must find space in primary group */
panic("hash_page: hpte already exists\n");
}
HvCallHpt_addValidate(slot, 0, (HPTE *)&local_hpte );
}
/*
* Bolt the kernel addr space into the HPT
*/
static void __init iSeries_bolt_kernel(unsigned long saddr, unsigned long eaddr)
{
unsigned long pa;
unsigned long mode_rw = _PAGE_ACCESSED | _PAGE_COHERENT | PP_RWXX;
HPTE hpte;
for (pa=saddr; pa < eaddr ;pa+=PAGE_SIZE) {
for (pa = saddr; pa < eaddr ;pa += PAGE_SIZE) {
unsigned long ea = (unsigned long)__va(pa);
unsigned long vsid = get_kernel_vsid( ea );
unsigned long va = ( vsid << 28 ) | ( pa & 0xfffffff );
unsigned long vsid = get_kernel_vsid(ea);
unsigned long va = (vsid << 28) | (pa & 0xfffffff);
unsigned long vpn = va >> PAGE_SHIFT;
unsigned long slot = HvCallHpt_findValid( &hpte, vpn );
if ( hpte.dw0.dw0.v ) {
unsigned long slot = HvCallHpt_findValid(&hpte, vpn);
if (hpte.dw0.dw0.v) {
/* HPTE exists, so just bolt it */
HvCallHpt_setSwBits( slot, 0x10, 0 );
} else {
HvCallHpt_setSwBits(slot, 0x10, 0);
/* And make sure the pp bits are correct */
HvCallHpt_setPp(slot, PP_RWXX);
} else
/* No HPTE exists, so create a new bolted one */
build_valid_hpte(vsid, ea, pa, NULL, mode_rw, 1);
}
iSeries_make_pte(va, (unsigned long)__v2a(ea),
mode_rw);
}
}
#endif /* CONFIG_PPC_ISERIES */
extern unsigned long ppc_proc_freq;
extern unsigned long ppc_tb_freq;
......@@ -639,10 +671,9 @@ extern unsigned long ppc_tb_freq;
/*
* Document me.
*/
void __init
iSeries_setup_arch(void)
void __init iSeries_setup_arch(void)
{
void * eventStack;
void *eventStack;
unsigned procIx = get_paca()->xLpPaca.xDynHvPhysicalProcIndex;
/* Add an eye catcher and the systemcfg layout version number */
......@@ -657,50 +688,43 @@ iSeries_setup_arch(void)
* we subtract out the KERNELBASE and add in the
* absolute real address of the kernel load area
*/
eventStack = alloc_bootmem_pages( LpEventStackSize );
memset( eventStack, 0, LpEventStackSize );
eventStack = alloc_bootmem_pages(LpEventStackSize);
memset(eventStack, 0, LpEventStackSize);
/* Invoke the hypervisor to initialize the event stack */
HvCallEvent_setLpEventStack( 0, eventStack, LpEventStackSize );
HvCallEvent_setLpEventStack(0, eventStack, LpEventStackSize);
/* Initialize fields in our Lp Event Queue */
xItLpQueue.xSlicEventStackPtr = (char *)eventStack;
xItLpQueue.xSlicCurEventPtr = (char *)eventStack;
xItLpQueue.xSlicLastValidEventPtr = (char *)eventStack +
(LpEventStackSize - LpEventMaxSize);
xItLpQueue.xIndex = 0;
/* Compute processor frequency */
procFreqHz = (((1UL<<34) * 1000000) / xIoHriProcessorVpd[procIx].xProcFreq );
procFreqHz = ((1UL << 34) * 1000000) /
xIoHriProcessorVpd[procIx].xProcFreq;
procFreqMhz = procFreqHz / 1000000;
procFreqMhzHundreths = (procFreqHz/10000) - (procFreqMhz*100);
procFreqMhzHundreths = (procFreqHz / 10000) - (procFreqMhz * 100);
ppc_proc_freq = procFreqHz;
/* Compute time base frequency */
tbFreqHz = (((1UL<<32) * 1000000) / xIoHriProcessorVpd[procIx].xTimeBaseFreq );
tbFreqHz = ((1UL << 32) * 1000000) /
xIoHriProcessorVpd[procIx].xTimeBaseFreq;
tbFreqMhz = tbFreqHz / 1000000;
tbFreqMhzHundreths = (tbFreqHz/10000) - (tbFreqMhz*100);
tbFreqMhzHundreths = (tbFreqHz / 10000) - (tbFreqMhz * 100);
ppc_tb_freq = tbFreqHz;
printk("Max logical processors = %d\n",
itVpdAreas.xSlicMaxLogicalProcs );
itVpdAreas.xSlicMaxLogicalProcs);
printk("Max physical processors = %d\n",
itVpdAreas.xSlicMaxPhysicalProcs );
printk("Processor frequency = %lu.%02lu\n",
procFreqMhz,
procFreqMhzHundreths );
printk("Time base frequency = %lu.%02lu\n",
tbFreqMhz,
tbFreqMhzHundreths );
itVpdAreas.xSlicMaxPhysicalProcs);
printk("Processor frequency = %lu.%02lu\n", procFreqMhz,
procFreqMhzHundreths);
printk("Time base frequency = %lu.%02lu\n", tbFreqMhz,
tbFreqMhzHundreths);
systemcfg->processor = xIoHriProcessorVpd[procIx].xPVR;
printk("Processor version = %x\n", systemcfg->processor);
}
/*
......@@ -715,38 +739,27 @@ iSeries_setup_arch(void)
*
* Output(s):
* *buffer - Buffer with CPU data.
*
* Returns:
* The number of bytes copied into 'buffer' if OK, otherwise zero or less
* on error.
*/
void iSeries_setup_residual(struct seq_file *m)
void iSeries_setup_residual(struct seq_file *m, int cpu_id)
{
seq_printf(m,"clock\t\t: %lu.%02luMhz\n",
procFreqMhz, procFreqMhzHundreths );
seq_printf(m,"time base\t: %lu.%02luMHz\n",
tbFreqMhz, tbFreqMhzHundreths );
seq_printf(m,"i-cache\t\t: %d\n",
systemcfg->iCacheL1LineSize);
seq_printf(m,"d-cache\t\t: %d\n",
systemcfg->dCacheL1LineSize);
seq_printf(m, "clock\t\t: %lu.%02luMhz\n", procFreqMhz,
procFreqMhzHundreths);
seq_printf(m, "time base\t: %lu.%02luMHz\n", tbFreqMhz,
tbFreqMhzHundreths);
seq_printf(m, "i-cache\t\t: %d\n", systemcfg->iCacheL1LineSize);
seq_printf(m, "d-cache\t\t: %d\n", systemcfg->dCacheL1LineSize);
}
void iSeries_get_cpuinfo(struct seq_file *m)
{
seq_printf(m,"machine\t\t: 64-bit iSeries Logical Partition\n");
seq_printf(m, "machine\t\t: 64-bit iSeries Logical Partition\n");
}
/*
* Document me.
* and Implement me.
*/
int
iSeries_get_irq(struct pt_regs *regs)
int iSeries_get_irq(struct pt_regs *regs)
{
/* -2 means ignore this interrupt */
return -2;
......@@ -755,8 +768,7 @@ iSeries_get_irq(struct pt_regs *regs)
/*
* Document me.
*/
void
iSeries_restart(char *cmd)
void iSeries_restart(char *cmd)
{
mf_reboot();
}
......@@ -764,8 +776,7 @@ iSeries_restart(char *cmd)
/*
* Document me.
*/
void
iSeries_power_off(void)
void iSeries_power_off(void)
{
mf_powerOff();
}
......@@ -773,8 +784,7 @@ iSeries_power_off(void)
/*
* Document me.
*/
void
iSeries_halt(void)
void iSeries_halt(void)
{
mf_powerOff();
}
......@@ -792,24 +802,19 @@ extern void setup_default_decr(void);
* and sets up the kernel timer decrementer based on that value.
*
*/
void __init
iSeries_calibrate_decr(void)
void __init iSeries_calibrate_decr(void)
{
unsigned long cyclesPerUsec;
struct div_result divres;
/* Compute decrementer (and TB) frequency
* in cycles/sec
*/
/* Compute decrementer (and TB) frequency in cycles/sec */
cyclesPerUsec = ppc_tb_freq / 1000000;
cyclesPerUsec = ppc_tb_freq / 1000000; /* cycles / usec */
/* Set the amount to refresh the decrementer by. This
/*
* Set the amount to refresh the decrementer by. This
* is the number of decrementer ticks it takes for
* 1/HZ seconds.
*/
tb_ticks_per_jiffy = ppc_tb_freq / HZ;
#if 0
......@@ -824,47 +829,54 @@ iSeries_calibrate_decr(void)
* that jiffies (and xtime) will match the time returned
* by do_gettimeofday.
*/
tb_ticks_per_sec = tb_ticks_per_jiffy * HZ;
tb_ticks_per_sec = tb_ticks_per_jiffy * HZ;
tb_ticks_per_usec = cyclesPerUsec;
tb_to_us = mulhwu_scale_factor(ppc_tb_freq, 1000000);
div128_by_32( 1024*1024, 0, tb_ticks_per_sec, &divres );
div128_by_32(1024 * 1024, 0, tb_ticks_per_sec, &divres);
tb_to_xs = divres.result_low;
setup_default_decr();
}
void __init
iSeries_progress( char * st, unsigned short code )
void __init iSeries_progress(char * st, unsigned short code)
{
printk( "Progress: [%04x] - %s\n", (unsigned)code, st );
if ( !piranha_simulator && mf_initialized ) {
if (code != 0xffff)
mf_displayProgress( code );
else
mf_clearSrc();
printk("Progress: [%04x] - %s\n", (unsigned)code, st);
if (!piranha_simulator && mf_initialized) {
if (code != 0xffff)
mf_displayProgress(code);
else
mf_clearSrc();
}
}
void iSeries_fixup_klimit(void)
{
/* Change klimit to take into account any ram disk that may be included */
/*
* Change klimit to take into account any ram disk
* that may be included
*/
if (naca->xRamDisk)
klimit = KERNELBASE + (u64)naca->xRamDisk + (naca->xRamDiskSize * PAGE_SIZE);
klimit = KERNELBASE + (u64)naca->xRamDisk +
(naca->xRamDiskSize * PAGE_SIZE);
else {
/* No ram disk was included - check and see if there was an embedded system map */
/* Change klimit to take into account any embedded system map */
/*
* No ram disk was included - check and see if there
* was an embedded system map. Change klimit to take
* into account any embedded system map
*/
if (embedded_sysmap_end)
klimit = KERNELBASE + ((embedded_sysmap_end+4095) & 0xfffffffffffff000);
klimit = KERNELBASE + ((embedded_sysmap_end + 4095) &
0xfffffffffffff000);
}
}
static void iSeries_setup_dprofile(void)
{
if ( dprof_buffer ) {
if (dprof_buffer) {
unsigned i;
for (i=0; i<NR_CPUS; ++i) {
for (i = 0; i < NR_CPUS; ++i) {
paca[i].prof_shift = dprof_shift;
paca[i].prof_len = dprof_len-1;
paca[i].prof_len = dprof_len - 1;
paca[i].prof_buffer = dprof_buffer;
paca[i].prof_stext = (unsigned *)_stext;
mb();
......
......@@ -19,25 +19,24 @@
#ifndef __ISERIES_SETUP_H__
#define __ISERIES_SETUP_H__
extern void iSeries_init_early(void);
extern void iSeries_init(unsigned long r3,
unsigned long ird_start,
unsigned long ird_end,
unsigned long cline_start,
unsigned long cline_end);
extern void iSeries_setup_arch(void);
extern void iSeries_setup_residual(struct seq_file *m);
extern void iSeries_get_cpuinfo(struct seq_file *m);
extern void iSeries_init_IRQ(void);
extern int iSeries_get_irq(struct pt_regs *regs);
extern void iSeries_restart(char *cmd);
extern void iSeries_power_off(void);
extern void iSeries_halt(void);
extern void iSeries_time_init(void);
extern void iSeries_get_boot_time(struct rtc_time *tm);
extern int iSeries_set_rtc_time(unsigned long now);
extern unsigned long iSeries_get_rtc_time(void);
extern void iSeries_calibrate_decr(void);
extern void iSeries_progress( char *, unsigned short );
extern void iSeries_init_early(void);
extern void iSeries_init(unsigned long r3, unsigned long ird_start,
unsigned long ird_end, unsigned long cline_start,
unsigned long cline_end);
extern void iSeries_setup_arch(void);
extern void iSeries_setup_residual(struct seq_file *m, int cpu_id);
extern void iSeries_get_cpuinfo(struct seq_file *m);
extern void iSeries_init_IRQ(void);
extern void iSeries_init_irq_desc(irq_desc_t *);
extern int iSeries_get_irq(struct pt_regs *regs);
extern void iSeries_restart(char *cmd);
extern void iSeries_power_off(void);
extern void iSeries_halt(void);
extern void iSeries_time_init(void);
extern void iSeries_get_boot_time(struct rtc_time *tm);
extern int iSeries_set_rtc_time(struct rtc_time *tm);
extern void iSeries_get_rtc_time(struct rtc_time *tm);
extern void iSeries_calibrate_decr(void);
extern void iSeries_progress( char *, unsigned short );
#endif /* __ISERIES_SETUP_H__ */
......@@ -70,7 +70,7 @@ static void yield_shared_processor(void)
lpaca->next_jiffy_update_tb);
lpaca->yielded = 0; /* Back to IPI's */
locale_irq_enable();
local_irq_enable();
/*
* The decrementer stops during the yield. Force a fake
......@@ -89,16 +89,14 @@ int iSeries_idle(void)
long oldval;
unsigned long CTRL;
/* endless loop with no priority at all */
current->nice = 20;
current->counter = -100;
/* ensure iSeries run light will be out when idle */
current->thread.flags &= ~PPC_FLAG_RUN_LIGHT;
clear_thread_flag(TIF_RUN_LIGHT);
CTRL = mfspr(CTRLF);
CTRL &= ~RUNLATCH;
mtspr(CTRLT, CTRL);
#if 0
init_idle();
#endif
lpaca = get_paca();
......@@ -106,26 +104,29 @@ int iSeries_idle(void)
if (lpaca->xLpPaca.xSharedProc) {
if (ItLpQueue_isLpIntPending(lpaca->lpQueuePtr))
process_iSeries_events();
if (!current->need_resched)
if (!need_resched())
yield_shared_processor();
} else {
/* Avoid an IPI by setting need_resched */
oldval = xchg(&current->need_resched, -1);
oldval = test_and_clear_thread_flag(TIF_NEED_RESCHED);
if (!oldval) {
while(current->need_resched == -1) {
set_thread_flag(TIF_POLLING_NRFLAG);
while (!need_resched()) {
HMT_medium();
if (ItLpQueue_isLpIntPending(lpaca->lpQueuePtr))
process_iSeries_events();
HMT_low();
}
HMT_medium();
clear_thread_flag(TIF_POLLING_NRFLAG);
} else {
set_need_resched();
}
}
HMT_medium();
if (current->need_resched) {
lpaca->xLpPaca.xIdle = 0;
schedule();
check_pgt_cache();
}
schedule();
}
return 0;
}
......@@ -158,10 +159,11 @@ int default_idle(void)
return 0;
}
#ifdef CONFIG_PPC_PSERIES
int dedicated_idle(void)
{
long oldval;
struct paca_struct *lpaca = get_paca(), *ppaca;;
struct paca_struct *lpaca = get_paca(), *ppaca;
unsigned long start_snooze;
ppaca = &paca[(lpaca->xPacaIndex) ^ 1];
......@@ -274,6 +276,7 @@ int shared_idle(void)
return 0;
}
#endif
int cpu_idle(void)
{
......
......@@ -59,7 +59,6 @@
extern void iSeries_smp_message_recv( struct pt_regs * );
#endif
volatile unsigned char *chrp_int_ack_special;
static void register_irq_proc (unsigned int irq);
irq_desc_t irq_desc[NR_IRQS] __cacheline_aligned = {
......@@ -561,17 +560,14 @@ void ppc_irq_dispatch_handler(struct pt_regs *regs, int irq)
spin_unlock(&desc->lock);
}
#ifdef CONFIG_PPC_ISERIES
int do_IRQ(struct pt_regs *regs)
{
int irq, first = 1;
#ifdef CONFIG_PPC_ISERIES
struct paca_struct *lpaca;
struct ItLpQueue *lpq;
#endif
irq_enter();
#ifdef CONFIG_PPC_ISERIES
lpaca = get_paca();
#ifdef CONFIG_SMP
if (lpaca->xLpPaca.xIntDword.xFields.xIpiCnt) {
......@@ -582,7 +578,24 @@ int do_IRQ(struct pt_regs *regs)
lpq = lpaca->lpQueuePtr;
if (lpq && ItLpQueue_isLpIntPending(lpq))
lpEvent_count += ItLpQueue_process(lpq, regs);
#else
irq_exit();
if (lpaca->xLpPaca.xIntDword.xFields.xDecrInt) {
lpaca->xLpPaca.xIntDword.xFields.xDecrInt = 0;
/* Signal a fake decrementer interrupt */
timer_interrupt(regs);
}
return 1; /* lets ret_from_int know we can do checks */
}
#else /* CONFIG_PPC_ISERIES */
int do_IRQ(struct pt_regs *regs)
{
int irq, first = 1;
irq_enter();
/*
* Every arch is required to implement ppc_md.get_irq.
* This function will either return an irq number or -1 to
......@@ -598,20 +611,12 @@ int do_IRQ(struct pt_regs *regs)
if (irq != -2 && first)
/* That's not SMP safe ... but who cares ? */
ppc_spurious_interrupts++;
#endif
irq_exit();
#ifdef CONFIG_PPC_ISERIES
if (lpaca->xLpPaca.xIntDword.xFields.xDecrInt) {
lpaca->xLpPaca.xIntDword.xFields.xDecrInt = 0;
/* Signal a fake decrementer interrupt */
timer_interrupt(regs);
}
#endif
return 1; /* lets ret_from_int know we can do checks */
}
#endif /* CONFIG_PPC_ISERIES */
unsigned long probe_irq_on (void)
{
......@@ -636,10 +641,10 @@ void __init init_IRQ(void)
{
static int once = 0;
if ( once )
if (once)
return;
else
once++;
once++;
ppc_md.init_IRQ();
}
......
......@@ -42,160 +42,120 @@
#include <linux/pci.h>
#include <linux/bcd.h>
extern struct pci_dev * iSeries_vio_dev;
extern struct pci_dev *iSeries_vio_dev;
/*
* This is the structure layout for the Machine Facilites LPAR event
* flows.
*/
struct VspCmdData;
struct CeMsgData;
union SafeCast
{
u64 ptrAsU64;
union safe_cast {
u64 ptr_as_u64;
void *ptr;
};
struct VspCmdData {
union safe_cast token;
u16 cmd;
HvLpIndex lp_index;
u8 result_code;
u32 reserved;
union {
u64 state; /* GetStateOut */
u64 ipl_type; /* GetIplTypeOut, Function02SelectIplTypeIn */
u64 ipl_mode; /* GetIplModeOut, Function02SelectIplModeIn */
u64 page[4]; /* GetSrcHistoryIn */
u64 flag; /* GetAutoIplWhenPrimaryIplsOut,
SetAutoIplWhenPrimaryIplsIn,
WhiteButtonPowerOffIn,
Function08FastPowerOffIn,
IsSpcnRackPowerIncompleteOut */
struct {
u64 token;
u64 address_type;
u64 side;
u32 length;
u32 offset;
} kern; /* SetKernelImageIn, GetKernelImageIn,
SetKernelCmdLineIn, GetKernelCmdLineIn */
u32 length_out; /* GetKernelImageOut, GetKernelCmdLineOut */
u8 reserved[80];
} sub_data;
};
typedef void (*CeMsgCompleteHandler)( void *token, struct CeMsgData *vspCmdRsp );
struct VspRspData {
struct semaphore *sem;
struct VspCmdData *response;
};
struct CeMsgCompleteData
{
CeMsgCompleteHandler xHdlr;
void *xToken;
struct AllocData {
u16 size;
u16 type;
u32 count;
u16 reserved1;
u8 reserved2;
HvLpIndex target_lp;
};
struct VspRspData
{
struct semaphore *xSemaphore;
struct VspCmdData *xResponse;
struct CeMsgData;
typedef void (*CeMsgCompleteHandler)(void *token, struct CeMsgData *vspCmdRsp);
struct CeMsgCompleteData {
CeMsgCompleteHandler handler;
void *token;
};
struct IoMFLpEvent
{
struct HvLpEvent xHvLpEvent;
u16 xSubtypeRc;
u16 xRsvd1;
u32 xRsvd2;
union
{
struct AllocData
{
u16 xSize;
u16 xType;
u32 xCount;
u16 xRsvd3;
u8 xRsvd4;
HvLpIndex xTargetLp;
} xAllocData;
struct CeMsgData
{
u8 xCEMsg[12];
char xReserved[4];
struct CeMsgCompleteData *xToken;
} xCEMsgData;
struct VspCmdData
{
union SafeCast xTokenUnion;
u16 xCmd;
HvLpIndex xLpIndex;
u8 xRc;
u32 xReserved1;
union VspCmdSubData
{
struct
{
u64 xState;
} xGetStateOut;
struct
{
u64 xIplType;
} xGetIplTypeOut, xFunction02SelectIplTypeIn;
struct
{
u64 xIplMode;
} xGetIplModeOut, xFunction02SelectIplModeIn;
struct
{
u64 xPage[4];
} xGetSrcHistoryIn;
struct
{
u64 xFlag;
} xGetAutoIplWhenPrimaryIplsOut,
xSetAutoIplWhenPrimaryIplsIn,
xWhiteButtonPowerOffIn,
xFunction08FastPowerOffIn,
xIsSpcnRackPowerIncompleteOut;
struct
{
u64 xToken;
u64 xAddressType;
u64 xSide;
u32 xTransferLength;
u32 xOffset;
} xSetKernelImageIn,
xGetKernelImageIn,
xSetKernelCmdLineIn,
xGetKernelCmdLineIn;
struct
{
u32 xTransferLength;
} xGetKernelImageOut,xGetKernelCmdLineOut;
u8 xReserved2[80];
} xSubData;
} xVspCmd;
} xUnion;
struct CeMsgData {
u8 ce_msg[12];
char reserved[4];
struct CeMsgCompleteData *completion;
};
struct IoMFLpEvent {
struct HvLpEvent hp_lp_event;
u16 subtype_result_code;
u16 reserved1;
u32 reserved2;
union {
struct AllocData alloc;
struct CeMsgData ce_msg;
struct VspCmdData vsp_cmd;
} data;
};
#define subtype_data(a, b, c, d) \
(((a) << 24) + ((b) << 16) + ((c) << 8) + (d))
/*
* All outgoing event traffic is kept on a FIFO queue. The first
* pointer points to the one that is outstanding, and all new
* requests get stuck on the end. Also, we keep a certain number of
* preallocated stack elements so that we can operate very early in
* preallocated pending events so that we can operate very early in
* the boot up sequence (before kmalloc is ready).
*/
struct StackElement
{
struct StackElement * next;
struct pending_event {
struct pending_event *next;
struct IoMFLpEvent event;
MFCompleteHandler hdlr;
char dmaData[72];
unsigned dmaDataLength;
unsigned remoteAddress;
char dma_data[72];
unsigned dma_data_length;
unsigned remote_address;
};
static spinlock_t spinlock;
static struct StackElement * head = NULL;
static struct StackElement * tail = NULL;
static struct StackElement * avail = NULL;
static struct StackElement prealloc[16];
static spinlock_t pending_event_spinlock;
static struct pending_event *pending_event_head;
static struct pending_event *pending_event_tail;
static struct pending_event *pending_event_avail;
static struct pending_event pending_event_prealloc[16];
/*
* Put a stack element onto the available queue, so it can get reused.
* Attention! You must have the spinlock before calling!
* Put a pending event onto the available queue, so it can get reused.
* Attention! You must have the pending_event_spinlock before calling!
*/
void free( struct StackElement * element )
static void free_pending_event(struct pending_event *ev)
{
if ( element != NULL )
{
element->next = avail;
avail = element;
if (ev != NULL) {
ev->next = pending_event_avail;
pending_event_avail = ev;
}
}
......@@ -203,68 +163,68 @@ void free( struct StackElement * element )
* Enqueue the outbound event onto the stack. If the queue was
* empty to begin with, we must also issue it via the Hypervisor
* interface. There is a section of code below that will touch
* the first stack pointer without the protection of the spinlock.
* the first stack pointer without the protection of the pending_event_spinlock.
* This is OK, because we know that nobody else will be modifying
* the first pointer when we do this.
*/
static int signalEvent( struct StackElement * newElement )
static int signal_event(struct pending_event *ev)
{
int rc = 0;
unsigned long flags;
int go = 1;
struct StackElement * element;
struct pending_event *ev1;
HvLpEvent_Rc hvRc;
/* enqueue the event */
if ( newElement != NULL )
{
spin_lock_irqsave( &spinlock, flags );
if ( head == NULL )
head = newElement;
if (ev != NULL) {
ev->next = NULL;
spin_lock_irqsave(&pending_event_spinlock, flags);
if (pending_event_head == NULL)
pending_event_head = ev;
else {
go = 0;
tail->next = newElement;
pending_event_tail->next = ev;
}
newElement->next = NULL;
tail = newElement;
spin_unlock_irqrestore( &spinlock, flags );
pending_event_tail = ev;
spin_unlock_irqrestore(&pending_event_spinlock, flags);
}
/* send the event */
while ( go )
{
while (go) {
go = 0;
/* any DMA data to send beforehand? */
if ( head->dmaDataLength > 0 )
HvCallEvent_dmaToSp( head->dmaData, head->remoteAddress, head->dmaDataLength, HvLpDma_Direction_LocalToRemote );
hvRc = HvCallEvent_signalLpEvent(&head->event.xHvLpEvent);
if ( hvRc != HvLpEvent_Rc_Good )
{
printk( KERN_ERR "mf.c: HvCallEvent_signalLpEvent() failed with %d\n", (int)hvRc );
spin_lock_irqsave( &spinlock, flags );
element = head;
head = head->next;
if ( head != NULL )
if (pending_event_head->dma_data_length > 0)
HvCallEvent_dmaToSp(pending_event_head->dma_data,
pending_event_head->remote_address,
pending_event_head->dma_data_length,
HvLpDma_Direction_LocalToRemote);
hvRc = HvCallEvent_signalLpEvent(
&pending_event_head->event.hp_lp_event);
if (hvRc != HvLpEvent_Rc_Good) {
printk(KERN_ERR "mf.c: HvCallEvent_signalLpEvent() failed with %d\n",
(int)hvRc);
spin_lock_irqsave(&pending_event_spinlock, flags);
ev1 = pending_event_head;
pending_event_head = pending_event_head->next;
if (pending_event_head != NULL)
go = 1;
spin_unlock_irqrestore( &spinlock, flags );
spin_unlock_irqrestore(&pending_event_spinlock, flags);
if ( element == newElement )
if (ev1 == ev)
rc = -EIO;
else {
if ( element->hdlr != NULL )
{
union SafeCast mySafeCast;
mySafeCast.ptrAsU64 = element->event.xHvLpEvent.xCorrelationToken;
(*element->hdlr)( mySafeCast.ptr, -EIO );
}
else if (ev1->hdlr != NULL) {
union safe_cast mySafeCast;
mySafeCast.ptr_as_u64 = ev1->event.hp_lp_event.xCorrelationToken;
(*ev1->hdlr)(mySafeCast.ptr, -EIO);
}
spin_lock_irqsave( &spinlock, flags );
free( element );
spin_unlock_irqrestore( &spinlock, flags );
spin_lock_irqsave(&pending_event_spinlock, flags);
free_pending_event(ev1);
spin_unlock_irqrestore(&pending_event_spinlock, flags);
}
}
......@@ -272,80 +232,74 @@ static int signalEvent( struct StackElement * newElement )
}
/*
* Allocate a new StackElement structure, and initialize it.
* Allocate a new pending_event structure, and initialize it.
*/
static struct StackElement * newStackElement( void )
static struct pending_event *new_pending_event(void)
{
struct StackElement * newElement = NULL;
struct pending_event *ev = NULL;
HvLpIndex primaryLp = HvLpConfig_getPrimaryLpIndex();
unsigned long flags;
struct HvLpEvent *hev;
if ( newElement == NULL )
{
spin_lock_irqsave( &spinlock, flags );
if ( avail != NULL )
{
newElement = avail;
avail = avail->next;
}
spin_unlock_irqrestore( &spinlock, flags );
spin_lock_irqsave(&pending_event_spinlock, flags);
if (pending_event_avail != NULL) {
ev = pending_event_avail;
pending_event_avail = pending_event_avail->next;
}
if ( newElement == NULL )
newElement = kmalloc(sizeof(struct StackElement),GFP_ATOMIC);
if ( newElement == NULL )
{
printk( KERN_ERR "mf.c: unable to kmalloc %ld bytes\n", sizeof(struct StackElement) );
spin_unlock_irqrestore(&pending_event_spinlock, flags);
if (ev == NULL)
ev = kmalloc(sizeof(struct pending_event),GFP_ATOMIC);
if (ev == NULL) {
printk(KERN_ERR "mf.c: unable to kmalloc %ld bytes\n",
sizeof(struct pending_event));
return NULL;
}
memset( newElement, 0, sizeof(struct StackElement) );
newElement->event.xHvLpEvent.xFlags.xValid = 1;
newElement->event.xHvLpEvent.xFlags.xAckType = HvLpEvent_AckType_ImmediateAck;
newElement->event.xHvLpEvent.xFlags.xAckInd = HvLpEvent_AckInd_DoAck;
newElement->event.xHvLpEvent.xFlags.xFunction = HvLpEvent_Function_Int;
newElement->event.xHvLpEvent.xType = HvLpEvent_Type_MachineFac;
newElement->event.xHvLpEvent.xSourceLp = HvLpConfig_getLpIndex();
newElement->event.xHvLpEvent.xTargetLp = primaryLp;
newElement->event.xHvLpEvent.xSizeMinus1 = sizeof(newElement->event)-1;
newElement->event.xHvLpEvent.xRc = HvLpEvent_Rc_Good;
newElement->event.xHvLpEvent.xSourceInstanceId = HvCallEvent_getSourceLpInstanceId(primaryLp,HvLpEvent_Type_MachineFac);
newElement->event.xHvLpEvent.xTargetInstanceId = HvCallEvent_getTargetLpInstanceId(primaryLp,HvLpEvent_Type_MachineFac);
return newElement;
memset(ev, 0, sizeof(struct pending_event));
hev = &ev->event.hp_lp_event;
hev->xFlags.xValid = 1;
hev->xFlags.xAckType = HvLpEvent_AckType_ImmediateAck;
hev->xFlags.xAckInd = HvLpEvent_AckInd_DoAck;
hev->xFlags.xFunction = HvLpEvent_Function_Int;
hev->xType = HvLpEvent_Type_MachineFac;
hev->xSourceLp = HvLpConfig_getLpIndex();
hev->xTargetLp = primaryLp;
hev->xSizeMinus1 = sizeof(ev->event)-1;
hev->xRc = HvLpEvent_Rc_Good;
hev->xSourceInstanceId = HvCallEvent_getSourceLpInstanceId(primaryLp,
HvLpEvent_Type_MachineFac);
hev->xTargetInstanceId = HvCallEvent_getTargetLpInstanceId(primaryLp,
HvLpEvent_Type_MachineFac);
return ev;
}
static int signalVspInstruction( struct VspCmdData *vspCmd )
static int signal_vsp_instruction(struct VspCmdData *vspCmd)
{
struct StackElement * newElement = newStackElement();
int rc = 0;
struct pending_event *ev = new_pending_event();
int rc;
struct VspRspData response;
DECLARE_MUTEX_LOCKED(Semaphore);
response.xSemaphore = &Semaphore;
response.xResponse = vspCmd;
if ( newElement == NULL )
rc = -ENOMEM;
else {
newElement->event.xHvLpEvent.xSubtype = 6;
newElement->event.xHvLpEvent.x.xSubtypeData = ('M'<<24)+('F'<<16)+('V'<<8)+('I'<<0);
newElement->event.xUnion.xVspCmd.xTokenUnion.ptr = &response;
newElement->event.xUnion.xVspCmd.xCmd = vspCmd->xCmd;
newElement->event.xUnion.xVspCmd.xLpIndex = HvLpConfig_getLpIndex();
newElement->event.xUnion.xVspCmd.xRc = 0xFF;
newElement->event.xUnion.xVspCmd.xReserved1 = 0;
memcpy(&(newElement->event.xUnion.xVspCmd.xSubData),&(vspCmd->xSubData), sizeof(vspCmd->xSubData));
mb();
rc = signalEvent(newElement);
}
if (ev == NULL)
return -ENOMEM;
response.sem = &Semaphore;
response.response = vspCmd;
ev->event.hp_lp_event.xSubtype = 6;
ev->event.hp_lp_event.x.xSubtypeData =
subtype_data('M', 'F', 'V', 'I');
ev->event.data.vsp_cmd.token.ptr = &response;
ev->event.data.vsp_cmd.cmd = vspCmd->cmd;
ev->event.data.vsp_cmd.lp_index = HvLpConfig_getLpIndex();
ev->event.data.vsp_cmd.result_code = 0xFF;
ev->event.data.vsp_cmd.reserved = 0;
memcpy(&(ev->event.data.vsp_cmd.sub_data),
&(vspCmd->sub_data), sizeof(vspCmd->sub_data));
mb();
rc = signal_event(ev);
if (rc == 0)
{
down(&Semaphore);
}
return rc;
}
......@@ -353,46 +307,42 @@ static int signalVspInstruction( struct VspCmdData *vspCmd )
/*
* Send a 12-byte CE message to the primary partition VSP object
*/
static int signalCEMsg( char * ceMsg, void * token )
static int signal_ce_msg(char *ce_msg, struct CeMsgCompleteData *completion)
{
struct StackElement * newElement = newStackElement();
int rc = 0;
struct pending_event *ev = new_pending_event();
if ( newElement == NULL )
rc = -ENOMEM;
else {
newElement->event.xHvLpEvent.xSubtype = 0;
newElement->event.xHvLpEvent.x.xSubtypeData = ('M'<<24)+('F'<<16)+('C'<<8)+('E'<<0);
memcpy( newElement->event.xUnion.xCEMsgData.xCEMsg, ceMsg, 12 );
newElement->event.xUnion.xCEMsgData.xToken = token;
rc = signalEvent(newElement);
}
if (ev == NULL)
return -ENOMEM;
return rc;
ev->event.hp_lp_event.xSubtype = 0;
ev->event.hp_lp_event.x.xSubtypeData =
subtype_data('M', 'F', 'C', 'E');
memcpy(ev->event.data.ce_msg.ce_msg, ce_msg, 12);
ev->event.data.ce_msg.completion = completion;
return signal_event(ev);
}
/*
* Send a 12-byte CE message and DMA data to the primary partition VSP object
*/
static int dmaAndSignalCEMsg( char * ceMsg, void * token, void * dmaData, unsigned dmaDataLength, unsigned remoteAddress )
static int dma_and_signal_ce_msg(char *ce_msg,
struct CeMsgCompleteData *completion, void *dma_data,
unsigned dma_data_length, unsigned remote_address)
{
struct StackElement * newElement = newStackElement();
int rc = 0;
struct pending_event *ev = new_pending_event();
if ( newElement == NULL )
rc = -ENOMEM;
else {
newElement->event.xHvLpEvent.xSubtype = 0;
newElement->event.xHvLpEvent.x.xSubtypeData = ('M'<<24)+('F'<<16)+('C'<<8)+('E'<<0);
memcpy( newElement->event.xUnion.xCEMsgData.xCEMsg, ceMsg, 12 );
newElement->event.xUnion.xCEMsgData.xToken = token;
memcpy( newElement->dmaData, dmaData, dmaDataLength );
newElement->dmaDataLength = dmaDataLength;
newElement->remoteAddress = remoteAddress;
rc = signalEvent(newElement);
}
if (ev == NULL)
return -ENOMEM;
return rc;
ev->event.hp_lp_event.xSubtype = 0;
ev->event.hp_lp_event.x.xSubtypeData =
subtype_data('M', 'F', 'C', 'E');
memcpy(ev->event.data.ce_msg.ce_msg, ce_msg, 12);
ev->event.data.ce_msg.completion = completion;
memcpy(ev->dma_data, dma_data, dma_data_length);
ev->dma_data_length = dma_data_length;
ev->remote_address = remote_address;
return signal_event(ev);
}
/*
......@@ -401,18 +351,17 @@ static int dmaAndSignalCEMsg( char * ceMsg, void * token, void * dmaData, unsign
* this fails (why?), we'll simply force it off in a not-so-nice
* manner.
*/
static int shutdown( void )
static int shutdown(void)
{
int rc = kill_proc(1,SIGINT,1);
int rc = kill_proc(1, SIGINT, 1);
if ( rc )
{
printk( KERN_ALERT "mf.c: SIGINT to init failed (%d), hard shutdown commencing\n", rc );
if (rc) {
printk(KERN_ALERT "mf.c: SIGINT to init failed (%d), "
"hard shutdown commencing\n", rc);
mf_powerOff();
}
else
printk( KERN_INFO "mf.c: init has been successfully notified to proceed with shutdown\n" );
} else
printk(KERN_INFO "mf.c: init has been successfully notified "
"to proceed with shutdown\n");
return rc;
}
......@@ -420,67 +369,64 @@ static int shutdown( void )
* The primary partition VSP object is sending us a new
* event flow. Handle it...
*/
static void intReceived( struct IoMFLpEvent * event )
static void intReceived(struct IoMFLpEvent *event)
{
int freeIt = 0;
struct StackElement * two = NULL;
struct pending_event *two = NULL;
/* ack the interrupt */
event->xHvLpEvent.xRc = HvLpEvent_Rc_Good;
HvCallEvent_ackLpEvent( &event->xHvLpEvent );
event->hp_lp_event.xRc = HvLpEvent_Rc_Good;
HvCallEvent_ackLpEvent(&event->hp_lp_event);
/* process interrupt */
switch( event->xHvLpEvent.xSubtype )
{
/* process interrupt */
switch (event->hp_lp_event.xSubtype) {
case 0: /* CE message */
switch( event->xUnion.xCEMsgData.xCEMsg[3] )
{
switch (event->data.ce_msg.ce_msg[3]) {
case 0x5B: /* power control notification */
if ( (event->xUnion.xCEMsgData.xCEMsg[5]&0x20) != 0 )
{
printk( KERN_INFO "mf.c: Commencing partition shutdown\n" );
if ( shutdown() == 0 )
signalCEMsg( "\x00\x00\x00\xDB\x00\x00\x00\x00\x00\x00\x00\x00", NULL );
if ((event->data.ce_msg.ce_msg[5] & 0x20) != 0) {
printk(KERN_INFO "mf.c: Commencing partition shutdown\n");
if (shutdown() == 0)
signal_ce_msg("\x00\x00\x00\xDB\x00\x00\x00\x00\x00\x00\x00\x00", NULL);
}
break;
case 0xC0: /* get time */
{
if ( (head != NULL) && ( head->event.xUnion.xCEMsgData.xCEMsg[3] == 0x40 ) )
{
freeIt = 1;
if ( head->event.xUnion.xCEMsgData.xToken != 0 )
{
CeMsgCompleteHandler xHdlr = head->event.xUnion.xCEMsgData.xToken->xHdlr;
void * token = head->event.xUnion.xCEMsgData.xToken->xToken;
if ((pending_event_head == NULL) ||
(pending_event_head->event.data.ce_msg.ce_msg[3]
!= 0x40))
break;
freeIt = 1;
if (pending_event_head->event.data.ce_msg.completion != 0) {
CeMsgCompleteHandler handler = pending_event_head->event.data.ce_msg.completion->handler;
void *token = pending_event_head->event.data.ce_msg.completion->token;
if (xHdlr != NULL)
(*xHdlr)( token, &(event->xUnion.xCEMsgData) );
}
}
if (handler != NULL)
(*handler)(token, &(event->data.ce_msg));
}
break;
}
/* remove from queue */
if ( freeIt == 1 )
{
if (freeIt == 1) {
unsigned long flags;
spin_lock_irqsave( &spinlock, flags );
if ( head != NULL )
{
struct StackElement *oldHead = head;
head = head->next;
two = head;
free( oldHead );
spin_lock_irqsave(&pending_event_spinlock, flags);
if (pending_event_head != NULL) {
struct pending_event *oldHead =
pending_event_head;
pending_event_head = pending_event_head->next;
two = pending_event_head;
free_pending_event(oldHead);
}
spin_unlock_irqrestore( &spinlock, flags );
spin_unlock_irqrestore(&pending_event_spinlock, flags);
}
/* send next waiting event */
if ( two != NULL )
signalEvent( NULL );
if (two != NULL)
signal_event(NULL);
break;
case 1: /* IT sys shutdown */
printk( KERN_INFO "mf.c: Commencing system shutdown\n" );
printk(KERN_INFO "mf.c: Commencing system shutdown\n");
shutdown();
break;
}
......@@ -491,81 +437,74 @@ static void intReceived( struct IoMFLpEvent * event )
* of a flow we sent to them. If there are other flows queued
* up, we must send another one now...
*/
static void ackReceived( struct IoMFLpEvent * event )
static void ackReceived(struct IoMFLpEvent *event)
{
unsigned long flags;
struct StackElement * two = NULL;
struct pending_event * two = NULL;
unsigned long freeIt = 0;
/* handle current event */
if ( head != NULL )
{
switch( event->xHvLpEvent.xSubtype )
{
/* handle current event */
if (pending_event_head != NULL) {
switch (event->hp_lp_event.xSubtype) {
case 0: /* CE msg */
if ( event->xUnion.xCEMsgData.xCEMsg[3] == 0x40 )
{
if ( event->xUnion.xCEMsgData.xCEMsg[2] != 0 )
{
if (event->data.ce_msg.ce_msg[3] == 0x40) {
if (event->data.ce_msg.ce_msg[2] != 0) {
freeIt = 1;
if ( head->event.xUnion.xCEMsgData.xToken != 0 )
{
CeMsgCompleteHandler xHdlr = head->event.xUnion.xCEMsgData.xToken->xHdlr;
void * token = head->event.xUnion.xCEMsgData.xToken->xToken;
if (pending_event_head->event.data.ce_msg.completion
!= 0) {
CeMsgCompleteHandler handler = pending_event_head->event.data.ce_msg.completion->handler;
void *token = pending_event_head->event.data.ce_msg.completion->token;
if (xHdlr != NULL)
(*xHdlr)( token, &(event->xUnion.xCEMsgData) );
if (handler != NULL)
(*handler)(token, &(event->data.ce_msg));
}
}
} else {
} else
freeIt = 1;
}
break;
case 4: /* allocate */
case 5: /* deallocate */
if ( head->hdlr != NULL )
{
union SafeCast mySafeCast;
mySafeCast.ptrAsU64 = event->xHvLpEvent.xCorrelationToken;
(*head->hdlr)( mySafeCast.ptr, event->xUnion.xAllocData.xCount );
if (pending_event_head->hdlr != NULL) {
union safe_cast mySafeCast;
mySafeCast.ptr_as_u64 = event->hp_lp_event.xCorrelationToken;
(*pending_event_head->hdlr)(mySafeCast.ptr, event->data.alloc.count);
}
freeIt = 1;
break;
case 6:
{
struct VspRspData *rsp = (struct VspRspData *)event->xUnion.xVspCmd.xTokenUnion.ptr;
if (rsp != NULL)
{
if (rsp->xResponse != NULL)
memcpy(rsp->xResponse, &(event->xUnion.xVspCmd), sizeof(event->xUnion.xVspCmd));
if (rsp->xSemaphore != NULL)
up(rsp->xSemaphore);
} else {
printk( KERN_ERR "mf.c: no rsp\n");
}
struct VspRspData *rsp = (struct VspRspData *)event->data.vsp_cmd.token.ptr;
if (rsp != NULL) {
if (rsp->response != NULL)
memcpy(rsp->response, &(event->data.vsp_cmd), sizeof(event->data.vsp_cmd));
if (rsp->sem != NULL)
up(rsp->sem);
} else
printk(KERN_ERR "mf.c: no rsp\n");
freeIt = 1;
}
break;
}
}
else
printk( KERN_ERR "mf.c: stack empty for receiving ack\n" );
/* remove from queue */
spin_lock_irqsave( &spinlock, flags );
if (( head != NULL ) && ( freeIt == 1 ))
{
struct StackElement *oldHead = head;
head = head->next;
two = head;
free( oldHead );
printk(KERN_ERR "mf.c: stack empty for receiving ack\n");
/* remove from queue */
spin_lock_irqsave(&pending_event_spinlock, flags);
if ((pending_event_head != NULL) && (freeIt == 1)) {
struct pending_event *oldHead = pending_event_head;
pending_event_head = pending_event_head->next;
two = pending_event_head;
free_pending_event(oldHead);
}
spin_unlock_irqrestore( &spinlock, flags );
spin_unlock_irqrestore(&pending_event_spinlock, flags);
/* send next waiting event */
if ( two != NULL )
signalEvent( NULL );
/* send next waiting event */
if (two != NULL)
signal_event(NULL);
}
/*
......@@ -574,101 +513,94 @@ static void ackReceived( struct IoMFLpEvent * event )
* parse it enough to know if it is an interrupt or an
* acknowledge.
*/
static void hvHandler( struct HvLpEvent * event, struct pt_regs * regs )
static void hvHandler(struct HvLpEvent *event, struct pt_regs *regs)
{
if ( (event != NULL) && (event->xType == HvLpEvent_Type_MachineFac) )
{
switch( event->xFlags.xFunction )
{
if ((event != NULL) && (event->xType == HvLpEvent_Type_MachineFac)) {
switch(event->xFlags.xFunction) {
case HvLpEvent_Function_Ack:
ackReceived( (struct IoMFLpEvent *)event );
ackReceived((struct IoMFLpEvent *)event);
break;
case HvLpEvent_Function_Int:
intReceived( (struct IoMFLpEvent *)event );
intReceived((struct IoMFLpEvent *)event);
break;
default:
printk( KERN_ERR "mf.c: non ack/int event received\n" );
printk(KERN_ERR "mf.c: non ack/int event received\n");
break;
}
}
else
printk( KERN_ERR "mf.c: alien event received\n" );
} else
printk(KERN_ERR "mf.c: alien event received\n");
}
/*
* Global kernel interface to allocate and seed events into the
* Hypervisor.
*/
void mf_allocateLpEvents( HvLpIndex targetLp,
HvLpEvent_Type type,
unsigned size,
unsigned count,
MFCompleteHandler hdlr,
void * userToken )
void mf_allocateLpEvents(HvLpIndex targetLp, HvLpEvent_Type type,
unsigned size, unsigned count, MFCompleteHandler hdlr,
void *userToken)
{
struct StackElement * newElement = newStackElement();
int rc = 0;
struct pending_event *ev = new_pending_event();
int rc;
if ( newElement == NULL )
if (ev == NULL) {
rc = -ENOMEM;
else {
union SafeCast mine;
} else {
union safe_cast mine;
mine.ptr = userToken;
newElement->event.xHvLpEvent.xSubtype = 4;
newElement->event.xHvLpEvent.xCorrelationToken = mine.ptrAsU64;
newElement->event.xHvLpEvent.x.xSubtypeData = ('M'<<24)+('F'<<16)+('M'<<8)+('A'<<0);
newElement->event.xUnion.xAllocData.xTargetLp = targetLp;
newElement->event.xUnion.xAllocData.xType = type;
newElement->event.xUnion.xAllocData.xSize = size;
newElement->event.xUnion.xAllocData.xCount = count;
newElement->hdlr = hdlr;
rc = signalEvent(newElement);
ev->event.hp_lp_event.xSubtype = 4;
ev->event.hp_lp_event.xCorrelationToken = mine.ptr_as_u64;
ev->event.hp_lp_event.x.xSubtypeData =
subtype_data('M', 'F', 'M', 'A');
ev->event.data.alloc.target_lp = targetLp;
ev->event.data.alloc.type = type;
ev->event.data.alloc.size = size;
ev->event.data.alloc.count = count;
ev->hdlr = hdlr;
rc = signal_event(ev);
}
if ( (rc != 0) && (hdlr != NULL) )
(*hdlr)( userToken, rc );
if ((rc != 0) && (hdlr != NULL))
(*hdlr)(userToken, rc);
}
/*
* Global kernel interface to unseed and deallocate events already in
* Hypervisor.
*/
void mf_deallocateLpEvents( HvLpIndex targetLp,
HvLpEvent_Type type,
unsigned count,
MFCompleteHandler hdlr,
void * userToken )
void mf_deallocateLpEvents(HvLpIndex targetLp, HvLpEvent_Type type,
unsigned count, MFCompleteHandler hdlr, void *userToken)
{
struct StackElement * newElement = newStackElement();
int rc = 0;
struct pending_event *ev = new_pending_event();
int rc;
if ( newElement == NULL )
if (ev == NULL)
rc = -ENOMEM;
else {
union SafeCast mine;
union safe_cast mine;
mine.ptr = userToken;
newElement->event.xHvLpEvent.xSubtype = 5;
newElement->event.xHvLpEvent.xCorrelationToken = mine.ptrAsU64;
newElement->event.xHvLpEvent.x.xSubtypeData = ('M'<<24)+('F'<<16)+('M'<<8)+('D'<<0);
newElement->event.xUnion.xAllocData.xTargetLp = targetLp;
newElement->event.xUnion.xAllocData.xType = type;
newElement->event.xUnion.xAllocData.xCount = count;
newElement->hdlr = hdlr;
rc = signalEvent(newElement);
ev->event.hp_lp_event.xSubtype = 5;
ev->event.hp_lp_event.xCorrelationToken = mine.ptr_as_u64;
ev->event.hp_lp_event.x.xSubtypeData =
subtype_data('M', 'F', 'M', 'D');
ev->event.data.alloc.target_lp = targetLp;
ev->event.data.alloc.type = type;
ev->event.data.alloc.count = count;
ev->hdlr = hdlr;
rc = signal_event(ev);
}
if ( (rc != 0) && (hdlr != NULL) )
(*hdlr)( userToken, rc );
if ((rc != 0) && (hdlr != NULL))
(*hdlr)(userToken, rc);
}
/*
* Global kernel interface to tell the VSP object in the primary
* partition to power this partition off.
*/
void mf_powerOff( void )
void mf_powerOff(void)
{
printk( KERN_INFO "mf.c: Down it goes...\n" );
signalCEMsg( "\x00\x00\x00\x4D\x00\x00\x00\x00\x00\x00\x00\x00", NULL );
printk(KERN_INFO "mf.c: Down it goes...\n");
signal_ce_msg("\x00\x00\x00\x4D\x00\x00\x00\x00\x00\x00\x00\x00", NULL);
for (;;);
}
......@@ -676,111 +608,104 @@ void mf_powerOff( void )
* Global kernel interface to tell the VSP object in the primary
* partition to reboot this partition.
*/
void mf_reboot( void )
void mf_reboot(void)
{
printk( KERN_INFO "mf.c: Preparing to bounce...\n" );
signalCEMsg( "\x00\x00\x00\x4E\x00\x00\x00\x00\x00\x00\x00\x00", NULL );
printk(KERN_INFO "mf.c: Preparing to bounce...\n");
signal_ce_msg("\x00\x00\x00\x4E\x00\x00\x00\x00\x00\x00\x00\x00", NULL);
for (;;);
}
/*
* Display a single word SRC onto the VSP control panel.
*/
void mf_displaySrc( u32 word )
void mf_displaySrc(u32 word)
{
u8 ce[12];
memcpy( ce, "\x00\x00\x00\x4A\x00\x00\x00\x01\x00\x00\x00\x00", 12 );
ce[8] = word>>24;
ce[9] = word>>16;
ce[10] = word>>8;
memcpy(ce, "\x00\x00\x00\x4A\x00\x00\x00\x01\x00\x00\x00\x00", 12);
ce[8] = word >> 24;
ce[9] = word >> 16;
ce[10] = word >> 8;
ce[11] = word;
signalCEMsg( ce, NULL );
signal_ce_msg(ce, NULL);
}
/*
* Display a single word SRC of the form "PROGXXXX" on the VSP control panel.
*/
void mf_displayProgress( u16 value )
void mf_displayProgress(u16 value)
{
u8 ce[12];
u8 src[72];
memcpy( ce, "\x00\x00\x04\x4A\x00\x00\x00\x48\x00\x00\x00\x00", 12 );
memcpy( src,
"\x01\x00\x00\x01"
"\x00\x00\x00\x00"
"\x00\x00\x00\x00"
"\x00\x00\x00\x00"
"\x00\x00\x00\x00"
"\x00\x00\x00\x00"
"\x00\x00\x00\x00"
"\x00\x00\x00\x00"
"\x00\x00\x00\x00"
"\x00\x00\x00\x00"
"PROGxxxx"
" ",
72 );
src[6] = value>>8;
src[7] = value&255;
src[44] = "0123456789ABCDEF"[(value>>12)&15];
src[45] = "0123456789ABCDEF"[(value>>8)&15];
src[46] = "0123456789ABCDEF"[(value>>4)&15];
src[47] = "0123456789ABCDEF"[value&15];
dmaAndSignalCEMsg( ce, NULL, src, sizeof(src), 9*64*1024 );
memcpy(ce, "\x00\x00\x04\x4A\x00\x00\x00\x48\x00\x00\x00\x00", 12);
memcpy(src, "\x01\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00"
"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
"\x00\x00\x00\x00PROGxxxx ",
72);
src[6] = value >> 8;
src[7] = value & 255;
src[44] = "0123456789ABCDEF"[(value >> 12) & 15];
src[45] = "0123456789ABCDEF"[(value >> 8) & 15];
src[46] = "0123456789ABCDEF"[(value >> 4) & 15];
src[47] = "0123456789ABCDEF"[value & 15];
dma_and_signal_ce_msg(ce, NULL, src, sizeof(src), 9 * 64 * 1024);
}
/*
* Clear the VSP control panel. Used to "erase" an SRC that was
* previously displayed.
*/
void mf_clearSrc( void )
void mf_clearSrc(void)
{
signalCEMsg( "\x00\x00\x00\x4B\x00\x00\x00\x00\x00\x00\x00\x00", NULL );
signal_ce_msg("\x00\x00\x00\x4B\x00\x00\x00\x00\x00\x00\x00\x00", NULL);
}
/*
* Initialization code here.
*/
void mf_init( void )
void mf_init(void)
{
int i;
/* initialize */
spin_lock_init( &spinlock );
for ( i = 0; i < sizeof(prealloc)/sizeof(*prealloc); ++i )
free( &prealloc[i] );
HvLpEvent_registerHandler( HvLpEvent_Type_MachineFac, &hvHandler );
/* initialize */
spin_lock_init(&pending_event_spinlock);
for (i = 0;
i < sizeof(pending_event_prealloc) / sizeof(*pending_event_prealloc);
++i)
free_pending_event(&pending_event_prealloc[i]);
HvLpEvent_registerHandler(HvLpEvent_Type_MachineFac, &hvHandler);
/* virtual continue ack */
signalCEMsg( "\x00\x00\x00\x57\x00\x00\x00\x00\x00\x00\x00\x00", NULL );
signal_ce_msg("\x00\x00\x00\x57\x00\x00\x00\x00\x00\x00\x00\x00", NULL);
/* initialization complete */
printk( KERN_NOTICE "mf.c: iSeries Linux LPAR Machine Facilities initialized\n" );
printk(KERN_NOTICE "mf.c: iSeries Linux LPAR Machine Facilities initialized\n");
iSeries_proc_callback(&mf_proc_init);
}
void mf_setSide(char side)
{
int rc = 0;
u64 newSide = 0;
u64 newSide;
struct VspCmdData myVspCmd;
memset(&myVspCmd, 0, sizeof(myVspCmd));
if (side == 'A')
newSide = 0;
else if (side == 'B')
newSide = 1;
else if (side == 'C')
newSide = 2;
else
newSide = 3;
myVspCmd.xSubData.xFunction02SelectIplTypeIn.xIplType = newSide;
myVspCmd.xCmd = 10;
switch (side) {
case 'A': newSide = 0;
break;
case 'B': newSide = 1;
break;
case 'C': newSide = 2;
break;
default: newSide = 3;
break;
}
myVspCmd.sub_data.ipl_type = newSide;
myVspCmd.cmd = 10;
rc = signalVspInstruction(&myVspCmd);
(void)signal_vsp_instruction(&myVspCmd);
}
char mf_getSide(void)
......@@ -790,91 +715,82 @@ char mf_getSide(void)
struct VspCmdData myVspCmd;
memset(&myVspCmd, 0, sizeof(myVspCmd));
myVspCmd.xCmd = 2;
myVspCmd.xSubData.xFunction02SelectIplTypeIn.xIplType = 0;
myVspCmd.cmd = 2;
myVspCmd.sub_data.ipl_type = 0;
mb();
rc = signalVspInstruction(&myVspCmd);
rc = signal_vsp_instruction(&myVspCmd);
if (rc != 0)
{
return returnValue;
} else {
if (myVspCmd.xRc == 0)
{
if (myVspCmd.xSubData.xGetIplTypeOut.xIplType == 0)
returnValue = 'A';
else if (myVspCmd.xSubData.xGetIplTypeOut.xIplType == 1)
returnValue = 'B';
else if (myVspCmd.xSubData.xGetIplTypeOut.xIplType == 2)
returnValue = 'C';
else
returnValue = 'D';
if (myVspCmd.result_code == 0) {
switch (myVspCmd.sub_data.ipl_type) {
case 0: returnValue = 'A';
break;
case 1: returnValue = 'B';
break;
case 2: returnValue = 'C';
break;
default: returnValue = 'D';
break;
}
}
return returnValue;
}
void mf_getSrcHistory(char *buffer, int size)
{
/* struct IplTypeReturnStuff returnStuff;
struct StackElement * newElement = newStackElement();
int rc = 0;
char *pages[4];
pages[0] = kmalloc(4096, GFP_ATOMIC);
pages[1] = kmalloc(4096, GFP_ATOMIC);
pages[2] = kmalloc(4096, GFP_ATOMIC);
pages[3] = kmalloc(4096, GFP_ATOMIC);
if (( newElement == NULL ) || (pages[0] == NULL) || (pages[1] == NULL) || (pages[2] == NULL) || (pages[3] == NULL))
rc = -ENOMEM;
else
{
returnStuff.xType = 0;
returnStuff.xRc = 0;
returnStuff.xDone = 0;
newElement->event.xHvLpEvent.xSubtype = 6;
newElement->event.xHvLpEvent.x.xSubtypeData = ('M'<<24)+('F'<<16)+('V'<<8)+('I'<<0);
newElement->event.xUnion.xVspCmd.xEvent = &returnStuff;
newElement->event.xUnion.xVspCmd.xCmd = 4;
newElement->event.xUnion.xVspCmd.xLpIndex = HvLpConfig_getLpIndex();
newElement->event.xUnion.xVspCmd.xRc = 0xFF;
newElement->event.xUnion.xVspCmd.xReserved1 = 0;
newElement->event.xUnion.xVspCmd.xSubData.xGetSrcHistoryIn.xPage[0] = (0x8000000000000000ULL | virt_to_absolute((unsigned long)pages[0]));
newElement->event.xUnion.xVspCmd.xSubData.xGetSrcHistoryIn.xPage[1] = (0x8000000000000000ULL | virt_to_absolute((unsigned long)pages[1]));
newElement->event.xUnion.xVspCmd.xSubData.xGetSrcHistoryIn.xPage[2] = (0x8000000000000000ULL | virt_to_absolute((unsigned long)pages[2]));
newElement->event.xUnion.xVspCmd.xSubData.xGetSrcHistoryIn.xPage[3] = (0x8000000000000000ULL | virt_to_absolute((unsigned long)pages[3]));
mb();
rc = signalEvent(newElement);
}
if (rc != 0)
{
return;
}
else
{
while (returnStuff.xDone != 1)
{
udelay(10);
}
if (returnStuff.xRc == 0)
{
memcpy(buffer, pages[0], size);
}
}
kfree(pages[0]);
kfree(pages[1]);
kfree(pages[2]);
kfree(pages[3]);*/
#if 0
struct IplTypeReturnStuff returnStuff;
struct pending_event *ev = new_pending_event();
int rc = 0;
char *pages[4];
pages[0] = kmalloc(4096, GFP_ATOMIC);
pages[1] = kmalloc(4096, GFP_ATOMIC);
pages[2] = kmalloc(4096, GFP_ATOMIC);
pages[3] = kmalloc(4096, GFP_ATOMIC);
if ((ev == NULL) || (pages[0] == NULL) || (pages[1] == NULL)
|| (pages[2] == NULL) || (pages[3] == NULL))
return -ENOMEM;
returnStuff.xType = 0;
returnStuff.xRc = 0;
returnStuff.xDone = 0;
ev->event.hp_lp_event.xSubtype = 6;
ev->event.hp_lp_event.x.xSubtypeData =
subtype_data('M', 'F', 'V', 'I');
ev->event.data.vsp_cmd.xEvent = &returnStuff;
ev->event.data.vsp_cmd.cmd = 4;
ev->event.data.vsp_cmd.lp_index = HvLpConfig_getLpIndex();
ev->event.data.vsp_cmd.result_code = 0xFF;
ev->event.data.vsp_cmd.reserved = 0;
ev->event.data.vsp_cmd.sub_data.page[0] =
(0x8000000000000000ULL | virt_to_absolute((unsigned long)pages[0]));
ev->event.data.vsp_cmd.sub_data.page[1] =
(0x8000000000000000ULL | virt_to_absolute((unsigned long)pages[1]));
ev->event.data.vsp_cmd.sub_data.page[2] =
(0x8000000000000000ULL | virt_to_absolute((unsigned long)pages[2]));
ev->event.data.vsp_cmd.sub_data.page[3] =
(0x8000000000000000ULL | virt_to_absolute((unsigned long)pages[3]));
mb();
if (signal_event(ev) != 0)
return;
while (returnStuff.xDone != 1)
udelay(10);
if (returnStuff.xRc == 0)
memcpy(buffer, pages[0], size);
kfree(pages[0]);
kfree(pages[1]);
kfree(pages[2]);
kfree(pages[3]);
#endif
}
void mf_setCmdLine(const char *cmdline, int size, u64 side)
{
struct VspCmdData myVspCmd;
int rc = 0;
dma_addr_t dma_addr = 0;
char *page = pci_alloc_consistent(iSeries_vio_dev, size, &dma_addr);
......@@ -886,13 +802,13 @@ void mf_setCmdLine(const char *cmdline, int size, u64 side)
copy_from_user(page, cmdline, size);
memset(&myVspCmd, 0, sizeof(myVspCmd));
myVspCmd.xCmd = 31;
myVspCmd.xSubData.xSetKernelCmdLineIn.xToken = dma_addr;
myVspCmd.xSubData.xSetKernelCmdLineIn.xAddressType = HvLpDma_AddressType_TceIndex;
myVspCmd.xSubData.xSetKernelCmdLineIn.xSide = side;
myVspCmd.xSubData.xSetKernelCmdLineIn.xTransferLength = size;
myVspCmd.cmd = 31;
myVspCmd.sub_data.kern.token = dma_addr;
myVspCmd.sub_data.kern.address_type = HvLpDma_AddressType_TceIndex;
myVspCmd.sub_data.kern.side = side;
myVspCmd.sub_data.kern.length = size;
mb();
rc = signalVspInstruction(&myVspCmd);
(void)signal_vsp_instruction(&myVspCmd);
pci_free_consistent(iSeries_vio_dev, size, page, dma_addr);
}
......@@ -900,31 +816,29 @@ void mf_setCmdLine(const char *cmdline, int size, u64 side)
int mf_getCmdLine(char *cmdline, int *size, u64 side)
{
struct VspCmdData myVspCmd;
int rc = 0;
int rc;
int len = *size;
dma_addr_t dma_addr = pci_map_single(iSeries_vio_dev, cmdline, *size, PCI_DMA_FROMDEVICE);
dma_addr_t dma_addr;
memset(cmdline, 0, *size);
dma_addr = pci_map_single(iSeries_vio_dev, cmdline, len,
PCI_DMA_FROMDEVICE);
memset(cmdline, 0, len);
memset(&myVspCmd, 0, sizeof(myVspCmd));
myVspCmd.xCmd = 33;
myVspCmd.xSubData.xGetKernelCmdLineIn.xToken = dma_addr;
myVspCmd.xSubData.xGetKernelCmdLineIn.xAddressType = HvLpDma_AddressType_TceIndex;
myVspCmd.xSubData.xGetKernelCmdLineIn.xSide = side;
myVspCmd.xSubData.xGetKernelCmdLineIn.xTransferLength = *size;
myVspCmd.cmd = 33;
myVspCmd.sub_data.kern.token = dma_addr;
myVspCmd.sub_data.kern.address_type = HvLpDma_AddressType_TceIndex;
myVspCmd.sub_data.kern.side = side;
myVspCmd.sub_data.kern.length = len;
mb();
rc = signalVspInstruction(&myVspCmd);
if ( ! rc ) {
rc = signal_vsp_instruction(&myVspCmd);
if (myVspCmd.xRc == 0)
{
len = myVspCmd.xSubData.xGetKernelCmdLineOut.xTransferLength;
}
/* else
{
if (rc == 0) {
if (myVspCmd.result_code == 0)
len = myVspCmd.sub_data.length_out;
#if 0
else
memcpy(cmdline, "Bad cmdline", 11);
}
*/
#endif
}
pci_unmap_single(iSeries_vio_dev, dma_addr, *size, PCI_DMA_FROMDEVICE);
......@@ -936,10 +850,8 @@ int mf_getCmdLine(char *cmdline, int *size, u64 side)
int mf_setVmlinuxChunk(const char *buffer, int size, int offset, u64 side)
{
struct VspCmdData myVspCmd;
int rc = 0;
int rc;
dma_addr_t dma_addr = 0;
char *page = pci_alloc_consistent(iSeries_vio_dev, size, &dma_addr);
if (page == NULL) {
......@@ -950,23 +862,19 @@ int mf_setVmlinuxChunk(const char *buffer, int size, int offset, u64 side)
copy_from_user(page, buffer, size);
memset(&myVspCmd, 0, sizeof(myVspCmd));
myVspCmd.xCmd = 30;
myVspCmd.xSubData.xGetKernelImageIn.xToken = dma_addr;
myVspCmd.xSubData.xGetKernelImageIn.xAddressType = HvLpDma_AddressType_TceIndex;
myVspCmd.xSubData.xGetKernelImageIn.xSide = side;
myVspCmd.xSubData.xGetKernelImageIn.xOffset = offset;
myVspCmd.xSubData.xGetKernelImageIn.xTransferLength = size;
myVspCmd.cmd = 30;
myVspCmd.sub_data.kern.token = dma_addr;
myVspCmd.sub_data.kern.address_type = HvLpDma_AddressType_TceIndex;
myVspCmd.sub_data.kern.side = side;
myVspCmd.sub_data.kern.offset = offset;
myVspCmd.sub_data.kern.length = size;
mb();
rc = signalVspInstruction(&myVspCmd);
if (rc == 0)
{
if (myVspCmd.xRc == 0)
{
rc = signal_vsp_instruction(&myVspCmd);
if (rc == 0) {
if (myVspCmd.result_code == 0)
rc = 0;
} else {
else
rc = -ENOMEM;
}
}
pci_free_consistent(iSeries_vio_dev, size, page, dma_addr);
......@@ -977,31 +885,27 @@ int mf_setVmlinuxChunk(const char *buffer, int size, int offset, u64 side)
int mf_getVmlinuxChunk(char *buffer, int *size, int offset, u64 side)
{
struct VspCmdData myVspCmd;
int rc = 0;
int rc;
int len = *size;
dma_addr_t dma_addr;
dma_addr_t dma_addr = pci_map_single(iSeries_vio_dev, buffer, *size, PCI_DMA_FROMDEVICE);
dma_addr = pci_map_single(iSeries_vio_dev, buffer, len,
PCI_DMA_FROMDEVICE);
memset(buffer, 0, len);
memset(&myVspCmd, 0, sizeof(myVspCmd));
myVspCmd.xCmd = 32;
myVspCmd.xSubData.xGetKernelImageIn.xToken = dma_addr;
myVspCmd.xSubData.xGetKernelImageIn.xAddressType = HvLpDma_AddressType_TceIndex;
myVspCmd.xSubData.xGetKernelImageIn.xSide = side;
myVspCmd.xSubData.xGetKernelImageIn.xOffset = offset;
myVspCmd.xSubData.xGetKernelImageIn.xTransferLength = len;
myVspCmd.cmd = 32;
myVspCmd.sub_data.kern.token = dma_addr;
myVspCmd.sub_data.kern.address_type = HvLpDma_AddressType_TceIndex;
myVspCmd.sub_data.kern.side = side;
myVspCmd.sub_data.kern.offset = offset;
myVspCmd.sub_data.kern.length = len;
mb();
rc = signalVspInstruction(&myVspCmd);
if (rc == 0)
{
if (myVspCmd.xRc == 0)
{
*size = myVspCmd.xSubData.xGetKernelImageOut.xTransferLength;
} else {
rc = signal_vsp_instruction(&myVspCmd);
if (rc == 0) {
if (myVspCmd.result_code == 0)
*size = myVspCmd.sub_data.length_out;
else
rc = -ENOMEM;
}
}
pci_unmap_single(iSeries_vio_dev, dma_addr, len, PCI_DMA_FROMDEVICE);
......@@ -1015,12 +919,11 @@ int mf_setRtcTime(unsigned long time)
to_tm(time, &tm);
return mf_setRtc( &tm );
return mf_setRtc(&tm);
}
struct RtcTimeData
{
struct semaphore *xSemaphore;
struct RtcTimeData {
struct semaphore *sem;
struct CeMsgData xCeMsg;
int xRc;
};
......@@ -1030,26 +933,23 @@ void getRtcTimeComplete(void * token, struct CeMsgData *ceMsg)
struct RtcTimeData *rtc = (struct RtcTimeData *)token;
memcpy(&(rtc->xCeMsg), ceMsg, sizeof(rtc->xCeMsg));
rtc->xRc = 0;
up(rtc->xSemaphore);
up(rtc->sem);
}
static unsigned long lastsec = 1;
int mf_getRtcTime(unsigned long *time)
{
/* unsigned long usec, tsec; */
u32 dataWord1 = *((u32 *)(&xSpCommArea.xBcdTimeAtIplStart));
u32 dataWord2 = *(((u32 *)&(xSpCommArea.xBcdTimeAtIplStart)) + 1);
int year = 1970;
int year1 = ( dataWord1 >> 24 ) & 0x000000FF;
int year2 = ( dataWord1 >> 16 ) & 0x000000FF;
int sec = ( dataWord1 >> 8 ) & 0x000000FF;
int year1 = (dataWord1 >> 24) & 0x000000FF;
int year2 = (dataWord1 >> 16) & 0x000000FF;
int sec = (dataWord1 >> 8) & 0x000000FF;
int min = dataWord1 & 0x000000FF;
int hour = ( dataWord2 >> 24 ) & 0x000000FF;
int day = ( dataWord2 >> 8 ) & 0x000000FF;
int hour = (dataWord2 >> 24) & 0x000000FF;
int day = (dataWord2 >> 8) & 0x000000FF;
int mon = dataWord2 & 0x000000FF;
BCD_TO_BIN(sec);
......@@ -1062,49 +962,41 @@ int mf_getRtcTime(unsigned long *time)
year = year1 * 100 + year2;
*time = mktime(year, mon, day, hour, min, sec);
*time += ( jiffies / HZ );
*time += (jiffies / HZ);
/* Now THIS is a nasty hack!
/*
* Now THIS is a nasty hack!
* It ensures that the first two calls to mf_getRtcTime get different
* answers. That way the loop in init_time (time.c) will not think
* the clock is stuck.
*/
if ( lastsec ) {
if (lastsec) {
*time -= lastsec;
--lastsec;
}
return 0;
}
int mf_getRtc( struct rtc_time * tm )
int mf_getRtc(struct rtc_time *tm)
{
struct CeMsgCompleteData ceComplete;
struct RtcTimeData rtcData;
int rc = 0;
int rc;
DECLARE_MUTEX_LOCKED(Semaphore);
memset(&ceComplete, 0, sizeof(ceComplete));
memset(&rtcData, 0, sizeof(rtcData));
rtcData.xSemaphore = &Semaphore;
ceComplete.xHdlr = &getRtcTimeComplete;
ceComplete.xToken = (void *)&rtcData;
rc = signalCEMsg( "\x00\x00\x00\x40\x00\x00\x00\x00\x00\x00\x00\x00", &ceComplete );
if ( rc == 0 )
{
rtcData.sem = &Semaphore;
ceComplete.handler = &getRtcTimeComplete;
ceComplete.token = (void *)&rtcData;
rc = signal_ce_msg("\x00\x00\x00\x40\x00\x00\x00\x00\x00\x00\x00\x00",
&ceComplete);
if (rc == 0) {
down(&Semaphore);
if ( rtcData.xRc == 0)
{
if ( ( rtcData.xCeMsg.xCEMsg[2] == 0xa9 ) ||
( rtcData.xCeMsg.xCEMsg[2] == 0xaf ) ) {
if (rtcData.xRc == 0) {
if ((rtcData.xCeMsg.ce_msg[2] == 0xa9) ||
(rtcData.xCeMsg.ce_msg[2] == 0xaf)) {
/* TOD clock is not set */
tm->tm_sec = 1;
tm->tm_min = 1;
......@@ -1112,16 +1004,16 @@ int mf_getRtc( struct rtc_time * tm )
tm->tm_mday = 10;
tm->tm_mon = 8;
tm->tm_year = 71;
mf_setRtc( tm );
mf_setRtc(tm);
}
{
u32 dataWord1 = *((u32 *)(rtcData.xCeMsg.xCEMsg+4));
u32 dataWord2 = *((u32 *)(rtcData.xCeMsg.xCEMsg+8));
u8 year = (dataWord1 >> 16 ) & 0x000000FF;
u8 sec = ( dataWord1 >> 8 ) & 0x000000FF;
u32 dataWord1 = *((u32 *)(rtcData.xCeMsg.ce_msg+4));
u32 dataWord2 = *((u32 *)(rtcData.xCeMsg.ce_msg+8));
u8 year = (dataWord1 >> 16) & 0x000000FF;
u8 sec = (dataWord1 >> 8) & 0x000000FF;
u8 min = dataWord1 & 0x000000FF;
u8 hour = ( dataWord2 >> 24 ) & 0x000000FF;
u8 day = ( dataWord2 >> 8 ) & 0x000000FF;
u8 hour = (dataWord2 >> 24) & 0x000000FF;
u8 day = (dataWord2 >> 8) & 0x000000FF;
u8 mon = dataWord2 & 0x000000FF;
BCD_TO_BIN(sec);
......@@ -1131,7 +1023,7 @@ int mf_getRtc( struct rtc_time * tm )
BCD_TO_BIN(mon);
BCD_TO_BIN(year);
if ( year <= 69 )
if (year <= 69)
year += 100;
tm->tm_sec = sec;
......@@ -1154,17 +1046,14 @@ int mf_getRtc( struct rtc_time * tm )
tm->tm_wday = 0;
tm->tm_yday = 0;
tm->tm_isdst = 0;
}
return rc;
}
int mf_setRtc(struct rtc_time * tm)
{
char ceTime[12] = "\x00\x00\x00\x41\x00\x00\x00\x00\x00\x00\x00\x00";
int rc = 0;
u8 day, mon, hour, min, sec, y1, y2;
unsigned year;
......@@ -1194,10 +1083,5 @@ int mf_setRtc(struct rtc_time * tm)
ceTime[10] = day;
ceTime[11] = mon;
rc = signalCEMsg( ceTime, NULL );
return rc;
return signal_ce_msg(ceTime, NULL);
}
......@@ -66,32 +66,31 @@ _GLOBAL(get_sp)
blr
#ifdef CONFIG_PPC_ISERIES
/* unsigned long __no_use_save_flags(void) */
_GLOBAL(__no_use_save_flags)
#warning FIX ISERIES
mfspr r4,SPRG3
lbz r3,PACAPROCENABLED(r4)
/* unsigned long local_save_flags(void) */
_GLOBAL(local_get_flags)
lbz r3,PACAPROCENABLED(r13)
blr
/* void __no_use_restore_flags(unsigned long flags) */
_GLOBAL(__no_use_restore_flags)
/*
* Just set/clear the MSR_EE bit through restore/flags but do not
* change anything else. This is needed by the RT system and makes
* sense anyway.
* -- Cort
*/
#warning FIX ISERIES
mfspr r6,SPRG3
lbz r5,PACAPROCENABLED(r6)
/* unsigned long local_irq_disable(void) */
_GLOBAL(local_irq_disable)
lbz r3,PACAPROCENABLED(r13)
li r4,0
stb r4,PACAPROCENABLED(r13)
blr /* Done */
/* void local_irq_restore(unsigned long flags) */
_GLOBAL(local_irq_restore)
lbz r5,PACAPROCENABLED(r13)
/* Check if things are setup the way we want _already_. */
cmpw 0,r3,r5
beqlr
/* are we enabling interrupts? */
cmpi 0,r3,0
stb r3,PACAPROCENABLED(r6)
stb r3,PACAPROCENABLED(r13)
beqlr
/* Check pending interrupts */
/* A decrementer, IPI or PMC interrupt may have occurred
* while we were in the hypervisor (which enables) */
CHECKANYINT(r4,r5)
beqlr
......@@ -101,35 +100,8 @@ _GLOBAL(__no_use_restore_flags)
li r0,0x5555
sc
blr
#endif /* CONFIG_PPC_ISERIES */
_GLOBAL(__no_use_cli)
#warning FIX ISERIES
mfspr r5,SPRG3
lbz r3,PACAPROCENABLED(r5)
li r4,0
stb r4,PACAPROCENABLED(r5)
blr /* Done */
_GLOBAL(__no_use_sti)
#warning FIX ISERIES
mfspr r6,SPRG3
li r3,1
stb r3,PACAPROCENABLED(r6)
/* Check for pending interrupts
* A decrementer, IPI or PMC interrupt may have occurred
* while we were in the hypervisor (which enables)
*/
CHECKANYINT(r4,r5)
beqlr
/*
* Handle pending interrupts in interrupt context
*/
li r0,0x5555
sc
blr
#endif
/*
* Flush instruction cache.
*/
......@@ -595,6 +567,10 @@ SYSCALL(dup)
SYSCALL(execve)
SYSCALL(waitpid)
#ifdef CONFIG_PPC_ISERIES /* hack hack hack */
#define ppc_rtas sys_ni_syscall
#endif
/* Why isn't this a) automatic, b) written in 'C'? */
.balign 8
_GLOBAL(sys_call_table32)
......
......@@ -48,11 +48,13 @@
/* #define MONITOR_TCE 1 */ /* Turn on to sanity check TCE generation. */
#ifdef CONFIG_PPC_PSERIES
/* Initialize so this guy does not end up in the BSS section.
* Only used to pass OF initialization data set in prom.c into the main
* kernel code -- data ultimately copied into tceTables[].
*/
extern struct _of_tce_table of_tce_table[];
#endif
extern struct pci_controller* hose_head;
extern struct pci_controller** hose_tail;
......@@ -98,7 +100,7 @@ void free_tce_range_nolock(struct TceTable *,
unsigned order );
/* allocates a range of tces and sets them to the pages */
inline dma_addr_t get_tces( struct TceTable *,
static inline dma_addr_t get_tces( struct TceTable *,
unsigned order,
void *page,
unsigned numPages,
......@@ -210,7 +212,7 @@ static void tce_build_pSeries(struct TceTable *tbl, long tcenum,
* Build a TceTable structure. This contains a multi-level bit map which
* is used to manage allocation of the tce space.
*/
struct TceTable *build_tce_table( struct TceTable * tbl )
static struct TceTable *build_tce_table( struct TceTable * tbl )
{
unsigned long bits, bytes, totalBytes;
unsigned long numBits[NUM_TCE_LEVELS], numBytes[NUM_TCE_LEVELS];
......@@ -518,7 +520,7 @@ static long test_tce_range( struct TceTable *tbl, long tcenum, unsigned order )
return retval;
}
inline dma_addr_t get_tces( struct TceTable *tbl, unsigned order, void *page, unsigned numPages, int direction )
static inline dma_addr_t get_tces( struct TceTable *tbl, unsigned order, void *page, unsigned numPages, int direction )
{
long tcenum;
unsigned long uaddr;
......@@ -581,7 +583,7 @@ static void tce_free_one_pSeries( struct TceTable *tbl, long tcenum )
}
#endif
void tce_free(struct TceTable *tbl, dma_addr_t dma_addr,
static void tce_free(struct TceTable *tbl, dma_addr_t dma_addr,
unsigned order, unsigned num_pages)
{
long tcenum, total_tces, free_tce;
......@@ -701,6 +703,7 @@ void create_tce_tables_for_buses(struct list_head *bus_list)
}
}
#ifdef CONFIG_PPC_PSERIES
void create_tce_tables_for_busesLP(struct list_head *bus_list)
{
struct list_head *ln;
......@@ -722,15 +725,19 @@ void create_tce_tables_for_busesLP(struct list_head *bus_list)
create_tce_tables_for_busesLP(&bus->children);
}
}
#endif
void create_tce_tables(void) {
struct pci_dev *dev = NULL;
struct device_node *dn, *mydn;
#ifdef CONFIG_PPC_PSERIES
if (systemcfg->platform == PLATFORM_PSERIES_LPAR) {
create_tce_tables_for_busesLP(&pci_root_buses);
}
else {
else
#endif
{
create_tce_tables_for_buses(&pci_root_buses);
}
/* Now copy the tce_table ptr from the bus devices down to every
......@@ -884,6 +891,7 @@ static void getTceTableParmsiSeries(struct iSeries_Device_Node* DevNode,
static void getTceTableParmsPSeries(struct pci_controller *phb,
struct device_node *dn,
struct TceTable *newTceTable ) {
#ifdef CONFIG_PPC_PSERIES
phandle node;
unsigned long i;
......@@ -953,6 +961,7 @@ static void getTceTableParmsPSeries(struct pci_controller *phb,
}
i++;
}
#endif
}
/*
......@@ -970,6 +979,7 @@ static void getTceTableParmsPSeries(struct pci_controller *phb,
static void getTceTableParmsPSeriesLP(struct pci_controller *phb,
struct device_node *dn,
struct TceTable *newTceTable ) {
#ifdef CONFIG_PPC_PSERIES
u32 *dma_window = (u32 *)get_property(dn, "ibm,dma-window", 0);
if (!dma_window) {
panic("PCI_DMA: getTceTableParmsPSeriesLP: device %s has no ibm,dma-window property!\n", dn->full_name);
......@@ -985,6 +995,7 @@ static void getTceTableParmsPSeriesLP(struct pci_controller *phb,
PPCDBG(PPCDBG_TCEINIT, "\tnewTceTable->index = 0x%lx\n", newTceTable->index);
PPCDBG(PPCDBG_TCEINIT, "\tnewTceTable->startOffset = 0x%lx\n", newTceTable->startOffset);
PPCDBG(PPCDBG_TCEINIT, "\tnewTceTable->size = 0x%lx\n", newTceTable->size);
#endif
}
/* Allocates a contiguous real buffer and creates TCEs over it.
......
......@@ -46,6 +46,7 @@
static void * __init
update_dn_pci_info(struct device_node *dn, void *data)
{
#ifdef CONFIG_PPC_PSERIES
struct pci_controller *phb = (struct pci_controller *)data;
u32 *regs;
char *device_type = get_property(dn, "device_type", 0);
......@@ -64,6 +65,7 @@ update_dn_pci_info(struct device_node *dn, void *data)
dn->devfn = (regs[0] >> 8) & 0xff;
}
}
#endif
return NULL;
}
......@@ -97,6 +99,7 @@ void *traverse_pci_devices(struct device_node *start, traverse_func pre, travers
return ret;
for (dn = start->child; dn; dn = nextdn) {
nextdn = NULL;
#ifdef CONFIG_PPC_PSERIES
if (get_property(dn, "class-code", 0)) {
if (pre && (ret = pre(dn, data)) != NULL)
return ret;
......@@ -112,6 +115,7 @@ void *traverse_pci_devices(struct device_node *start, traverse_func pre, travers
post(dn, data);
}
}
#endif
if (!nextdn) {
/* Walk up to next valid sibling. */
do {
......
......@@ -170,15 +170,15 @@ EXPORT_SYMBOL(flush_icache_user_range);
EXPORT_SYMBOL(flush_dcache_page);
#ifdef CONFIG_SMP
#ifdef CONFIG_PPC_ISERIES
EXPORT_SYMBOL(__no_use_restore_flags);
EXPORT_SYMBOL(__no_use_save_flags);
EXPORT_SYMBOL(__no_use_sti);
EXPORT_SYMBOL(__no_use_cli);
EXPORT_SYMBOL(local_get_flags);
EXPORT_SYMBOL(local_irq_disable);
EXPORT_SYMBOL(local_irq_restore);
#endif
#endif
EXPORT_SYMBOL(ppc_md);
#ifdef CONFIG_PPC_PSERIES
EXPORT_SYMBOL(find_devices);
EXPORT_SYMBOL(find_type_devices);
EXPORT_SYMBOL(find_compatible_devices);
......@@ -187,6 +187,7 @@ EXPORT_SYMBOL(device_is_compatible);
EXPORT_SYMBOL(machine_is_compatible);
EXPORT_SYMBOL(find_all_nodes);
EXPORT_SYMBOL(get_property);
#endif
EXPORT_SYMBOL_NOVERS(memcpy);
......
......@@ -80,8 +80,8 @@ int proc_pmc_set_pmc6( struct file *file, const char *buffer, unsigned long cou
int proc_pmc_set_pmc7( struct file *file, const char *buffer, unsigned long count, void *data);
int proc_pmc_set_pmc8( struct file *file, const char *buffer, unsigned long count, void *data);
void proc_ppc64_init(void)
#if 0
int proc_ppc64_init(void)
{
unsigned long i;
struct proc_dir_entry *ent = NULL;
......@@ -184,6 +184,7 @@ void proc_ppc64_init(void)
ent->write_proc = NULL;
}
}
#endif
/*
* Find the requested 'file' given a proc token.
......
......@@ -55,6 +55,7 @@ static struct file_operations page_map_fops = {
.mmap = page_map_mmap
};
#ifdef CONFIG_PPC_PSERIES
/* routines for /proc/ppc64/ofdt */
static ssize_t ofdt_write(struct file *, const char __user *, size_t, loff_t *);
static void proc_ppc64_create_ofdt(struct proc_dir_entry *);
......@@ -66,6 +67,7 @@ static char * parse_next_property(char *, char *, char **, int *, unsigned char*
static struct file_operations ofdt_fops = {
.write = ofdt_write
};
#endif
int __init proc_ppc64_init(void)
{
......@@ -108,6 +110,7 @@ int __init proc_ppc64_init(void)
}
}
#ifdef CONFIG_PPC_PSERIES
/* Placeholder for rtas interfaces. */
if (proc_ppc64.rtas == NULL)
proc_ppc64.rtas = proc_mkdir("rtas", proc_ppc64.root);
......@@ -116,6 +119,7 @@ int __init proc_ppc64_init(void)
proc_symlink("rtas", 0, "ppc64/rtas");
proc_ppc64_create_ofdt(proc_ppc64.root);
#endif
return 0;
}
......@@ -197,6 +201,7 @@ static int page_map_mmap( struct file *file, struct vm_area_struct *vma )
return 0;
}
#ifdef CONFIG_PPC_PSERIES
/* create /proc/ppc64/ofdt write-only by root */
static void proc_ppc64_create_ofdt(struct proc_dir_entry *parent)
{
......@@ -417,5 +422,6 @@ static void release_prop_list(const struct property *prop)
}
}
#endif /* defined(CONFIG_PPC_PSERIES) */
fs_initcall(proc_ppc64_init);
......@@ -188,6 +188,7 @@ void setup_system(unsigned long r3, unsigned long r4, unsigned long r5,
#endif
}
#ifdef CONFIG_PPC_PSERIES
if (systemcfg->platform & PLATFORM_PSERIES) {
early_console_initialized = 1;
register_console(&udbg_console);
......@@ -209,6 +210,7 @@ void setup_system(unsigned long r3, unsigned long r4, unsigned long r5,
}
#endif
}
#endif
printk("Starting Linux PPC64 %s\n", UTS_RELEASE);
......@@ -228,7 +230,7 @@ void setup_system(unsigned long r3, unsigned long r4, unsigned long r5,
mm_init_ppc64();
#ifdef CONFIG_SMP
#if defined(CONFIG_SMP) && defined(CONFIG_PPC_PSERIES)
if (cur_cpu_spec->firmware_features & FW_FEATURE_SPLPAR) {
vpa_init(boot_cpuid);
}
......@@ -310,6 +312,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
seq_printf(m, "\n");
#ifdef CONFIG_PPC_PSERIES
/*
* Assume here that all clock rates are the same in a
* smp system. -- Cort
......@@ -328,6 +331,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
of_node_put(cpu_node);
}
}
#endif
if (ppc_md.setup_residual != NULL)
ppc_md.setup_residual(m, cpu_id);
......@@ -362,9 +366,6 @@ struct seq_operations cpuinfo_op = {
void parse_cmd_line(unsigned long r3, unsigned long r4, unsigned long r5,
unsigned long r6, unsigned long r7)
{
struct device_node *chosen;
char *p;
#ifdef CONFIG_BLK_DEV_INITRD
if ((initrd_start == 0) && r3 && r4 && r4 != 0xdeadbeef) {
initrd_start = (r3 >= KERNELBASE) ? r3 : (unsigned long)__va(r3);
......@@ -380,13 +381,20 @@ void parse_cmd_line(unsigned long r3, unsigned long r4, unsigned long r5,
strlcpy(cmd_line, CONFIG_CMDLINE, sizeof(cmd_line));
#endif /* CONFIG_CMDLINE */
#ifdef CONFIG_PPC_PSERIES
{
struct device_node *chosen;
chosen = of_find_node_by_name(NULL, "chosen");
if (chosen != NULL) {
char *p;
p = get_property(chosen, "bootargs", NULL);
if (p != NULL && p[0] != 0)
strlcpy(cmd_line, p, sizeof(cmd_line));
of_node_put(chosen);
}
}
#endif
/* Look for mem= option on command line */
if (strstr(cmd_line, "mem=")) {
......@@ -412,28 +420,7 @@ void parse_cmd_line(unsigned long r3, unsigned long r4, unsigned long r5,
}
char *bi_tag2str(unsigned long tag)
{
switch (tag) {
case BI_FIRST:
return "BI_FIRST";
case BI_LAST:
return "BI_LAST";
case BI_CMD_LINE:
return "BI_CMD_LINE";
case BI_BOOTLOADER_ID:
return "BI_BOOTLOADER_ID";
case BI_INITRD:
return "BI_INITRD";
case BI_SYSMAP:
return "BI_SYSMAP";
case BI_MACHTYPE:
return "BI_MACHTYPE";
default:
return "BI_UNKNOWN";
}
}
#ifdef CONFIG_PPC_PSERIES
int parse_bootinfo(void)
{
struct bi_record *rec;
......@@ -467,6 +454,7 @@ int parse_bootinfo(void)
return 0;
}
#endif
int __init ppc_init(void)
{
......
......@@ -141,7 +141,7 @@ static int smp_iSeries_probe(void)
for (i=0; i < NR_CPUS; ++i) {
lpPaca = paca[i].xLpPacaPtr;
if (lpPaca->xDynProcStatus < 2) {
paca[i].active = 1;
/*paca[i].active = 1;*/
++np;
}
}
......@@ -187,7 +187,6 @@ void __init smp_init_iSeries(void)
smp_ops->probe = smp_iSeries_probe;
smp_ops->kick_cpu = smp_iSeries_kick_cpu;
smp_ops->setup_cpu = smp_iSeries_setup_cpu;
#warning fix for iseries
systemcfg->processorCount = smp_iSeries_numProcs();
}
#endif
......@@ -689,9 +688,11 @@ int __devinit start_secondary(void *unused)
get_paca()->yielded = 0;
#ifdef CONFIG_PPC_PSERIES
if (cur_cpu_spec->firmware_features & FW_FEATURE_SPLPAR) {
vpa_init(cpu);
}
#endif
local_irq_enable();
......
......@@ -16,6 +16,7 @@
* This file handles the architecture-dependent parts of hardware exceptions
*/
#include <linux/config.h>
#include <linux/errno.h>
#include <linux/sched.h>
#include <linux/kernel.h>
......@@ -26,7 +27,6 @@
#include <linux/user.h>
#include <linux/a.out.h>
#include <linux/interrupt.h>
#include <linux/config.h>
#include <linux/init.h>
#include <linux/module.h>
......@@ -40,8 +40,10 @@
extern int fix_alignment(struct pt_regs *);
extern void bad_page_fault(struct pt_regs *, unsigned long, int);
#ifdef CONFIG_PPC_PSERIES
/* This is true if we are using the firmware NMI handler (typically LPAR) */
extern int fwnmi_active;
#endif
#ifdef CONFIG_DEBUG_KERNEL
void (*debugger)(struct pt_regs *regs);
......@@ -96,6 +98,7 @@ _exception(int signr, siginfo_t *info, struct pt_regs *regs)
force_sig_info(signr, info, current);
}
#ifdef CONFIG_PPC_PSERIES
/* Get the error information for errors coming through the
* FWNMI vectors. The pt_regs' r3 will be updated to reflect
* the actual r3 if possible, and a ptr to the error log entry
......@@ -128,10 +131,12 @@ static void FWNMI_release_errinfo(void)
if (ret != 0)
printk("FWNMI: nmi-interlock failed: %ld\n", ret);
}
#endif
void
SystemResetException(struct pt_regs *regs)
{
#ifdef CONFIG_PPC_PSERIES
if (fwnmi_active) {
struct rtas_error_log *errhdr = FWNMI_get_errinfo(regs);
if (errhdr) {
......@@ -139,6 +144,7 @@ SystemResetException(struct pt_regs *regs)
}
FWNMI_release_errinfo();
}
#endif
#ifdef CONFIG_DEBUG_KERNEL
if (debugger)
......@@ -154,6 +160,7 @@ SystemResetException(struct pt_regs *regs)
/* What should we do here? We could issue a shutdown or hard reset. */
}
#ifdef CONFIG_PPC_PSERIES
/*
* See if we can recover from a machine check exception.
* This is only called on power4 (or above) and only via
......@@ -190,6 +197,7 @@ static int recover_mce(struct pt_regs *regs, struct rtas_error_log err)
}
return 0;
}
#endif
/*
* Handle a machine check.
......@@ -207,6 +215,7 @@ static int recover_mce(struct pt_regs *regs, struct rtas_error_log err)
void
MachineCheckException(struct pt_regs *regs)
{
#ifdef CONFIG_PPC_PSERIES
struct rtas_error_log err, *errp;
if (fwnmi_active) {
......@@ -217,6 +226,7 @@ MachineCheckException(struct pt_regs *regs)
if (errp && recover_mce(regs, err))
return;
}
#endif
#ifdef CONFIG_DEBUG_KERNEL
if (debugger_fault_handler) {
......
/* -*- linux-c -*-
* arch/ppc64/kernel/viopath.c
*
* iSeries Virtual I/O Message Path code
*
* Authors: Dave Boutcher <boutcher@us.ibm.com>
* Ryan Arnold <ryanarn@us.ibm.com>
* Colin Devilbiss <devilbis@us.ibm.com>
*
* (C) Copyright 2000-2003 IBM Corporation
*
* This code is used by the iSeries virtual disk, cd,
* tape, and console to communicate with OS/400 in another
* partition.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation; either version 2 of the
* License, or (at your option) anyu later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
#include <linux/config.h>
#include <asm/uaccess.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/vmalloc.h>
#include <linux/string.h>
#include <linux/proc_fs.h>
#include <linux/pci.h>
#include <linux/wait.h>
#include <asm/hardirq.h> /* for is_atomic */
#include <asm/iSeries/LparData.h>
#include <asm/iSeries/HvLpEvent.h>
#include <asm/iSeries/HvLpConfig.h>
#include <asm/iSeries/HvCallCfg.h>
#include <asm/iSeries/mf.h>
#include <asm/iSeries/iSeries_proc.h>
#include <asm/iSeries/vio.h>
extern struct pci_dev *iSeries_vio_dev;
/* Status of the path to each other partition in the system.
* This is overkill, since we will only ever establish connections
* to our hosting partition and the primary partition on the system.
* But this allows for other support in the future.
*/
static struct viopathStatus {
int isOpen:1; /* Did we open the path? */
int isActive:1; /* Do we have a mon msg outstanding */
int users[VIO_MAX_SUBTYPES];
HvLpInstanceId mSourceInst;
HvLpInstanceId mTargetInst;
int numberAllocated;
} viopathStatus[HVMAXARCHITECTEDLPS];
static spinlock_t statuslock = SPIN_LOCK_UNLOCKED;
/*
* For each kind of event we allocate a buffer that is
* guaranteed not to cross a page boundary
*/
static unsigned char event_buffer[VIO_MAX_SUBTYPES * 256] __page_aligned;
static atomic_t event_buffer_available[VIO_MAX_SUBTYPES];
static int event_buffer_initialised;
static void handleMonitorEvent(struct HvLpEvent *event);
/*
* We use this structure to handle asynchronous responses. The caller
* blocks on the semaphore and the handler posts the semaphore. However,
* if in_atomic() is true in the caller, then wait_atomic is used ...
*/
struct doneAllocParms_t {
struct semaphore *sem;
int number;
volatile unsigned long *wait_atomic;
int used_wait_atomic;
};
/* Put a sequence number in each mon msg. The value is not
* important. Start at something other than 0 just for
* readability. wrapping this is ok.
*/
static u8 viomonseq = 22;
/* Our hosting logical partition. We get this at startup
* time, and different modules access this variable directly.
*/
HvLpIndex viopath_hostLp = 0xff; /* HvLpIndexInvalid */
EXPORT_SYMBOL(viopath_hostLp);
HvLpIndex viopath_ourLp = 0xff;
EXPORT_SYMBOL(viopath_ourLp);
/* For each kind of incoming event we set a pointer to a
* routine to call.
*/
static vio_event_handler_t *vio_handler[VIO_MAX_SUBTYPES];
static unsigned char e2a(unsigned char x)
{
switch (x) {
case 0xF0:
return '0';
case 0xF1:
return '1';
case 0xF2:
return '2';
case 0xF3:
return '3';
case 0xF4:
return '4';
case 0xF5:
return '5';
case 0xF6:
return '6';
case 0xF7:
return '7';
case 0xF8:
return '8';
case 0xF9:
return '9';
case 0xC1:
return 'A';
case 0xC2:
return 'B';
case 0xC3:
return 'C';
case 0xC4:
return 'D';
case 0xC5:
return 'E';
case 0xC6:
return 'F';
case 0xC7:
return 'G';
case 0xC8:
return 'H';
case 0xC9:
return 'I';
case 0xD1:
return 'J';
case 0xD2:
return 'K';
case 0xD3:
return 'L';
case 0xD4:
return 'M';
case 0xD5:
return 'N';
case 0xD6:
return 'O';
case 0xD7:
return 'P';
case 0xD8:
return 'Q';
case 0xD9:
return 'R';
case 0xE2:
return 'S';
case 0xE3:
return 'T';
case 0xE4:
return 'U';
case 0xE5:
return 'V';
case 0xE6:
return 'W';
case 0xE7:
return 'X';
case 0xE8:
return 'Y';
case 0xE9:
return 'Z';
}
return ' ';
}
/* Handle reads from the proc file system
*/
static int proc_read(char *buf, char **start, off_t offset,
int blen, int *eof, void *data)
{
HvLpEvent_Rc hvrc;
DECLARE_MUTEX_LOCKED(Semaphore);
dma_addr_t dmaa =
pci_map_single(iSeries_vio_dev, buf, PAGE_SIZE,
PCI_DMA_FROMDEVICE);
int len = PAGE_SIZE;
if (len > blen)
len = blen;
memset(buf, 0x00, len);
hvrc = HvCallEvent_signalLpEventFast(viopath_hostLp,
HvLpEvent_Type_VirtualIo,
viomajorsubtype_config | vioconfigget,
HvLpEvent_AckInd_DoAck, HvLpEvent_AckType_ImmediateAck,
viopath_sourceinst(viopath_hostLp),
viopath_targetinst(viopath_hostLp),
(u64)(unsigned long)&Semaphore, VIOVERSION << 16,
((u64)dmaa) << 32, len, 0, 0);
if (hvrc != HvLpEvent_Rc_Good)
printk("viopath hv error on op %d\n", (int) hvrc);
down(&Semaphore);
pci_unmap_single(iSeries_vio_dev, dmaa, PAGE_SIZE,
PCI_DMA_FROMDEVICE);
sprintf(buf + strlen(buf), "SRLNBR=");
buf[strlen(buf)] = e2a(xItExtVpdPanel.mfgID[2]);
buf[strlen(buf)] = e2a(xItExtVpdPanel.mfgID[3]);
buf[strlen(buf)] = e2a(xItExtVpdPanel.systemSerial[1]);
buf[strlen(buf)] = e2a(xItExtVpdPanel.systemSerial[2]);
buf[strlen(buf)] = e2a(xItExtVpdPanel.systemSerial[3]);
buf[strlen(buf)] = e2a(xItExtVpdPanel.systemSerial[4]);
buf[strlen(buf)] = e2a(xItExtVpdPanel.systemSerial[5]);
buf[strlen(buf)] = '\n';
*eof = 1;
return strlen(buf);
}
/* Handle writes to our proc file system
*/
static int proc_write(struct file *file, const char *buffer,
unsigned long count, void *data)
{
/* Doesn't do anything today!!!
*/
return count;
}
/* setup our proc file system entries
*/
static void vio_proc_init(struct proc_dir_entry *iSeries_proc)
{
struct proc_dir_entry *ent;
ent = create_proc_entry("config", S_IFREG | S_IRUSR, iSeries_proc);
if (!ent)
return;
ent->nlink = 1;
ent->data = NULL;
ent->read_proc = proc_read;
ent->write_proc = proc_write;
}
/* See if a given LP is active. Allow for invalid lps to be passed in
* and just return invalid
*/
int viopath_isactive(HvLpIndex lp)
{
if (lp == HvLpIndexInvalid)
return 0;
if (lp < HVMAXARCHITECTEDLPS)
return viopathStatus[lp].isActive;
else
return 0;
}
EXPORT_SYMBOL(viopath_isactive);
/*
* We cache the source and target instance ids for each
* partition.
*/
HvLpInstanceId viopath_sourceinst(HvLpIndex lp)
{
return viopathStatus[lp].mSourceInst;
}
EXPORT_SYMBOL(viopath_sourceinst);
HvLpInstanceId viopath_targetinst(HvLpIndex lp)
{
return viopathStatus[lp].mTargetInst;
}
EXPORT_SYMBOL(viopath_targetinst);
/*
* Send a monitor message. This is a message with the acknowledge
* bit on that the other side will NOT explicitly acknowledge. When
* the other side goes down, the hypervisor will acknowledge any
* outstanding messages....so we will know when the other side dies.
*/
static void sendMonMsg(HvLpIndex remoteLp)
{
HvLpEvent_Rc hvrc;
viopathStatus[remoteLp].mSourceInst =
HvCallEvent_getSourceLpInstanceId(remoteLp,
HvLpEvent_Type_VirtualIo);
viopathStatus[remoteLp].mTargetInst =
HvCallEvent_getTargetLpInstanceId(remoteLp,
HvLpEvent_Type_VirtualIo);
/*
* Deliberately ignore the return code here. if we call this
* more than once, we don't care.
*/
vio_setHandler(viomajorsubtype_monitor, handleMonitorEvent);
hvrc = HvCallEvent_signalLpEventFast(remoteLp, HvLpEvent_Type_VirtualIo,
viomajorsubtype_monitor, HvLpEvent_AckInd_DoAck,
HvLpEvent_AckType_DeferredAck,
viopathStatus[remoteLp].mSourceInst,
viopathStatus[remoteLp].mTargetInst,
viomonseq++, 0, 0, 0, 0, 0);
if (hvrc == HvLpEvent_Rc_Good)
viopathStatus[remoteLp].isActive = 1;
else {
printk(KERN_WARNING_VIO "could not connect to partition %d\n",
remoteLp);
viopathStatus[remoteLp].isActive = 0;
}
}
static void handleMonitorEvent(struct HvLpEvent *event)
{
HvLpIndex remoteLp;
int i;
/*
* This handler is _also_ called as part of the loop
* at the end of this routine, so it must be able to
* ignore NULL events...
*/
if (!event)
return;
/*
* First see if this is just a normal monitor message from the
* other partition
*/
if (event->xFlags.xFunction == HvLpEvent_Function_Int) {
remoteLp = event->xSourceLp;
if (!viopathStatus[remoteLp].isActive)
sendMonMsg(remoteLp);
return;
}
/*
* This path is for an acknowledgement; the other partition
* died
*/
remoteLp = event->xTargetLp;
if ((event->xSourceInstanceId != viopathStatus[remoteLp].mSourceInst) ||
(event->xTargetInstanceId != viopathStatus[remoteLp].mTargetInst)) {
printk(KERN_WARNING_VIO "ignoring ack....mismatched instances\n");
return;
}
printk(KERN_WARNING_VIO "partition %d ended\n", remoteLp);
viopathStatus[remoteLp].isActive = 0;
/*
* For each active handler, pass them a NULL
* message to indicate that the other partition
* died
*/
for (i = 0; i < VIO_MAX_SUBTYPES; i++) {
if (vio_handler[i] != NULL)
(*vio_handler[i])(NULL);
}
}
int vio_setHandler(int subtype, vio_event_handler_t *beh)
{
subtype = subtype >> VIOMAJOR_SUBTYPE_SHIFT;
if ((subtype < 0) || (subtype >= VIO_MAX_SUBTYPES))
return -EINVAL;
if (vio_handler[subtype] != NULL)
return -EBUSY;
vio_handler[subtype] = beh;
return 0;
}
EXPORT_SYMBOL(vio_setHandler);
int vio_clearHandler(int subtype)
{
subtype = subtype >> VIOMAJOR_SUBTYPE_SHIFT;
if ((subtype < 0) || (subtype >= VIO_MAX_SUBTYPES))
return -EINVAL;
if (vio_handler[subtype] == NULL)
return -EAGAIN;
vio_handler[subtype] = NULL;
return 0;
}
EXPORT_SYMBOL(vio_clearHandler);
static void handleConfig(struct HvLpEvent *event)
{
if (!event)
return;
if (event->xFlags.xFunction == HvLpEvent_Function_Int) {
printk(KERN_WARNING_VIO
"unexpected config request from partition %d",
event->xSourceLp);
if ((event->xFlags.xFunction == HvLpEvent_Function_Int) &&
(event->xFlags.xAckInd == HvLpEvent_AckInd_DoAck)) {
event->xRc = HvLpEvent_Rc_InvalidSubtype;
HvCallEvent_ackLpEvent(event);
}
return;
}
up((struct semaphore *)event->xCorrelationToken);
}
/*
* Initialization of the hosting partition
*/
void vio_set_hostlp(void)
{
/*
* If this has already been set then we DON'T want to either change
* it or re-register the proc file system
*/
if (viopath_hostLp != HvLpIndexInvalid)
return;
/*
* Figure out our hosting partition. This isn't allowed to change
* while we're active
*/
viopath_ourLp = HvLpConfig_getLpIndex();
viopath_hostLp = HvCallCfg_getHostingLpIndex(viopath_ourLp);
/* If we have a valid hosting LP, create a proc file system entry
* for config information
*/
if (viopath_hostLp != HvLpIndexInvalid) {
iSeries_proc_callback(&vio_proc_init);
vio_setHandler(viomajorsubtype_config, handleConfig);
}
}
EXPORT_SYMBOL(vio_set_hostlp);
static void vio_handleEvent(struct HvLpEvent *event, struct pt_regs *regs)
{
HvLpIndex remoteLp;
int subtype = (event->xSubtype & VIOMAJOR_SUBTYPE_MASK)
>> VIOMAJOR_SUBTYPE_SHIFT;
if (event->xFlags.xFunction == HvLpEvent_Function_Int) {
remoteLp = event->xSourceLp;
/*
* The isActive is checked because if the hosting partition
* went down and came back up it would not be active but it
* would have different source and target instances, in which
* case we'd want to reset them. This case really protects
* against an unauthorized active partition sending interrupts
* or acks to this linux partition.
*/
if (viopathStatus[remoteLp].isActive
&& (event->xSourceInstanceId !=
viopathStatus[remoteLp].mTargetInst)) {
printk(KERN_WARNING_VIO
"message from invalid partition. "
"int msg rcvd, source inst (%d) doesnt match (%d)\n",
viopathStatus[remoteLp].mTargetInst,
event->xSourceInstanceId);
return;
}
if (viopathStatus[remoteLp].isActive
&& (event->xTargetInstanceId !=
viopathStatus[remoteLp].mSourceInst)) {
printk(KERN_WARNING_VIO
"message from invalid partition. "
"int msg rcvd, target inst (%d) doesnt match (%d)\n",
viopathStatus[remoteLp].mSourceInst,
event->xTargetInstanceId);
return;
}
} else {
remoteLp = event->xTargetLp;
if (event->xSourceInstanceId !=
viopathStatus[remoteLp].mSourceInst) {
printk(KERN_WARNING_VIO
"message from invalid partition. "
"ack msg rcvd, source inst (%d) doesnt match (%d)\n",
viopathStatus[remoteLp].mSourceInst,
event->xSourceInstanceId);
return;
}
if (event->xTargetInstanceId !=
viopathStatus[remoteLp].mTargetInst) {
printk(KERN_WARNING_VIO
"message from invalid partition. "
"viopath: ack msg rcvd, target inst (%d) doesnt match (%d)\n",
viopathStatus[remoteLp].mTargetInst,
event->xTargetInstanceId);
return;
}
}
if (vio_handler[subtype] == NULL) {
printk(KERN_WARNING_VIO
"unexpected virtual io event subtype %d from partition %d\n",
event->xSubtype, remoteLp);
/* No handler. Ack if necessary */
if ((event->xFlags.xFunction == HvLpEvent_Function_Int) &&
(event->xFlags.xAckInd == HvLpEvent_AckInd_DoAck)) {
event->xRc = HvLpEvent_Rc_InvalidSubtype;
HvCallEvent_ackLpEvent(event);
}
return;
}
/* This innocuous little line is where all the real work happens */
(*vio_handler[subtype])(event);
}
static void viopath_donealloc(void *parm, int number)
{
struct doneAllocParms_t *parmsp = (struct doneAllocParms_t *)parm;
parmsp->number = number;
if (parmsp->used_wait_atomic)
*(parmsp->wait_atomic) = 0;
else
up(parmsp->sem);
}
static int allocateEvents(HvLpIndex remoteLp, int numEvents)
{
struct doneAllocParms_t parms;
DECLARE_MUTEX_LOCKED(Semaphore);
volatile unsigned long wait_atomic = 1;
if (in_atomic()) {
parms.used_wait_atomic = 1;
parms.wait_atomic = &wait_atomic;
} else {
parms.used_wait_atomic = 0;
parms.sem = &Semaphore;
}
mf_allocateLpEvents(remoteLp, HvLpEvent_Type_VirtualIo, 250, /* It would be nice to put a real number here! */
numEvents, &viopath_donealloc, &parms);
if (in_atomic()) {
while (wait_atomic)
mb();
} else
down(&Semaphore);
return parms.number;
}
int viopath_open(HvLpIndex remoteLp, int subtype, int numReq)
{
int i;
unsigned long flags;
int tempNumAllocated;
if ((remoteLp >= HvMaxArchitectedLps) || (remoteLp == HvLpIndexInvalid))
return -EINVAL;
subtype = subtype >> VIOMAJOR_SUBTYPE_SHIFT;
if ((subtype < 0) || (subtype >= VIO_MAX_SUBTYPES))
return -EINVAL;
spin_lock_irqsave(&statuslock, flags);
if (!event_buffer_initialised) {
for (i = 0; i < VIO_MAX_SUBTYPES; i++)
atomic_set(&event_buffer_available[i], 1);
event_buffer_initialised = 1;
}
viopathStatus[remoteLp].users[subtype]++;
if (!viopathStatus[remoteLp].isOpen) {
viopathStatus[remoteLp].isOpen = 1;
HvCallEvent_openLpEventPath(remoteLp, HvLpEvent_Type_VirtualIo);
/*
* Don't hold the spinlock during an operation that
* can sleep.
*/
spin_unlock_irqrestore(&statuslock, flags);
tempNumAllocated = allocateEvents(remoteLp, 1);
spin_lock_irqsave(&statuslock, flags);
viopathStatus[remoteLp].numberAllocated += tempNumAllocated;
if (viopathStatus[remoteLp].numberAllocated == 0) {
HvCallEvent_closeLpEventPath(remoteLp,
HvLpEvent_Type_VirtualIo);
spin_unlock_irqrestore(&statuslock, flags);
return -ENOMEM;
}
viopathStatus[remoteLp].mSourceInst =
HvCallEvent_getSourceLpInstanceId(remoteLp,
HvLpEvent_Type_VirtualIo);
viopathStatus[remoteLp].mTargetInst =
HvCallEvent_getTargetLpInstanceId(remoteLp,
HvLpEvent_Type_VirtualIo);
HvLpEvent_registerHandler(HvLpEvent_Type_VirtualIo,
&vio_handleEvent);
sendMonMsg(remoteLp);
printk(KERN_INFO_VIO
"Opening connection to partition %d, setting sinst %d, tinst %d\n",
remoteLp, viopathStatus[remoteLp].mSourceInst,
viopathStatus[remoteLp].mTargetInst);
}
spin_unlock_irqrestore(&statuslock, flags);
tempNumAllocated = allocateEvents(remoteLp, numReq);
spin_lock_irqsave(&statuslock, flags);
viopathStatus[remoteLp].numberAllocated += tempNumAllocated;
spin_unlock_irqrestore(&statuslock, flags);
return 0;
}
EXPORT_SYMBOL(viopath_open);
int viopath_close(HvLpIndex remoteLp, int subtype, int numReq)
{
unsigned long flags;
int i;
int numOpen;
struct doneAllocParms_t doneAllocParms;
DECLARE_MUTEX_LOCKED(Semaphore);
if ((remoteLp >= HvMaxArchitectedLps) || (remoteLp == HvLpIndexInvalid))
return -EINVAL;
subtype = subtype >> VIOMAJOR_SUBTYPE_SHIFT;
if ((subtype < 0) || (subtype >= VIO_MAX_SUBTYPES))
return -EINVAL;
spin_lock_irqsave(&statuslock, flags);
/*
* If the viopath_close somehow gets called before a
* viopath_open it could decrement to -1 which is a non
* recoverable state so we'll prevent this from
* happening.
*/
if (viopathStatus[remoteLp].users[subtype] > 0)
viopathStatus[remoteLp].users[subtype]--;
spin_unlock_irqrestore(&statuslock, flags);
doneAllocParms.sem = &Semaphore;
mf_deallocateLpEvents(remoteLp, HvLpEvent_Type_VirtualIo,
numReq, &viopath_donealloc, &doneAllocParms);
down(&Semaphore);
spin_lock_irqsave(&statuslock, flags);
for (i = 0, numOpen = 0; i < VIO_MAX_SUBTYPES; i++)
numOpen += viopathStatus[remoteLp].users[i];
if ((viopathStatus[remoteLp].isOpen) && (numOpen == 0)) {
printk(KERN_INFO_VIO "Closing connection to partition %d",
remoteLp);
HvCallEvent_closeLpEventPath(remoteLp,
HvLpEvent_Type_VirtualIo);
viopathStatus[remoteLp].isOpen = 0;
viopathStatus[remoteLp].isActive = 0;
for (i = 0; i < VIO_MAX_SUBTYPES; i++)
atomic_set(&event_buffer_available[i], 0);
event_buffer_initialised = 0;
}
spin_unlock_irqrestore(&statuslock, flags);
return 0;
}
EXPORT_SYMBOL(viopath_close);
void *vio_get_event_buffer(int subtype)
{
subtype = subtype >> VIOMAJOR_SUBTYPE_SHIFT;
if ((subtype < 0) || (subtype >= VIO_MAX_SUBTYPES))
return NULL;
if (atomic_dec_if_positive(&event_buffer_available[subtype]) == 0)
return &event_buffer[subtype * 256];
else
return NULL;
}
EXPORT_SYMBOL(vio_get_event_buffer);
void vio_free_event_buffer(int subtype, void *buffer)
{
subtype = subtype >> VIOMAJOR_SUBTYPE_SHIFT;
if ((subtype < 0) || (subtype >= VIO_MAX_SUBTYPES)) {
printk(KERN_WARNING_VIO
"unexpected subtype %d freeing event buffer\n",
subtype);
return;
}
if (atomic_read(&event_buffer_available[subtype]) != 0) {
printk(KERN_WARNING_VIO
"freeing unallocated event buffer, subtype %d\n",
subtype);
return;
}
if (buffer != &event_buffer[subtype * 256]) {
printk(KERN_WARNING_VIO
"freeing invalid event buffer, subtype %d\n",
subtype);
}
atomic_set(&event_buffer_available[subtype], 1);
}
EXPORT_SYMBOL(vio_free_event_buffer);
static const struct vio_error_entry vio_no_error =
{ 0, 0, "Non-VIO Error" };
static const struct vio_error_entry vio_unknown_error =
{ 0, EIO, "Unknown Error" };
static const struct vio_error_entry vio_default_errors[] = {
{0x0001, EIO, "No Connection"},
{0x0002, EIO, "No Receiver"},
{0x0003, EIO, "No Buffer Available"},
{0x0004, EBADRQC, "Invalid Message Type"},
{0x0000, 0, NULL},
};
const struct vio_error_entry *vio_lookup_rc(
const struct vio_error_entry *local_table, u16 rc)
{
const struct vio_error_entry *cur;
if (!rc)
return &vio_no_error;
if (local_table)
for (cur = local_table; cur->rc; ++cur)
if (cur->rc == rc)
return cur;
for (cur = vio_default_errors; cur->rc; ++cur)
if (cur->rc == rc)
return cur;
return &vio_unknown_error;
}
EXPORT_SYMBOL(vio_lookup_rc);
......@@ -23,25 +23,16 @@
// drive the hypervisor from the OS.
//
//===========================================================================
#ifndef _HVCALL_H
#define _HVCALL_H
//-------------------------------------------------------------------
// Standard Includes
//-------------------------------------------------------------------
#ifndef _HVCALLSC_H
#include "HvCallSc.h"
#endif
#ifndef _HVTYPES_H
#include <asm/iSeries/HvCallSc.h>
#include <asm/iSeries/HvTypes.h>
#endif
#include <asm/paca.h>
//-------------------------------------------------------------------
// Constants
//-------------------------------------------------------------------
#ifndef _HVCALL_H
#define _HVCALL_H
/*
enum HvCall_ReturnCode
{
......@@ -211,5 +202,4 @@ static inline void HvCall_setDebugBus(unsigned long val)
HvCall1(HvCallBaseSetDebugBus, val);
}
#endif // _HVCALL_H
#endif /* _HVCALL_H */
......@@ -23,23 +23,18 @@
// drive the hypervisor from the OS.
//
//=====================================================================================
#ifndef _HVCALLCFG_H
#define _HVCALLCFG_H
//-------------------------------------------------------------------
// Standard Includes
//-------------------------------------------------------------------
#ifndef _HVCALLSC_H
#include "HvCallSc.h"
#endif
#ifndef _HVTYPES_H
#include <asm/iSeries/HvCallSc.h>
#include <asm/iSeries/HvTypes.h>
#endif
//-------------------------------------------------------------------------------------
// Constants
//-------------------------------------------------------------------------------------
#ifndef _HVCALLCFG_H
#define _HVCALLCFG_H
enum HvCallCfg_ReqQual
{
......@@ -215,5 +210,4 @@ static inline HvLpIndex HvCallCfg_getHostingLpIndex(HvLpIndex lp)
}
#endif // _HVCALLCFG_H
#endif /* _HVCALLCFG_H */
......@@ -17,44 +17,27 @@
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
//==================================================================
//
// This file contains the "hypervisor call" interface which is used to
// drive the hypervisor from the OS.
//
//==================================================================
//-------------------------------------------------------------------
// Standard Includes
//-------------------------------------------------------------------
#ifndef _HVCALLSC_H
#include <asm/iSeries/HvCallSc.h>
#endif
/*
* This file contains the "hypervisor call" interface which is used to
* drive the hypervisor from the OS.
*/
#ifndef _HVCALLEVENT_H
#define _HVCALLEVENT_H
#ifndef _HVTYPES_H
/*
* Standard Includes
*/
#include <asm/iSeries/HvCallSc.h>
#include <asm/iSeries/HvTypes.h>
#endif
#include <asm/abs_addr.h>
//-------------------------------------------------------------------
// Other Includes
//-------------------------------------------------------------------
//-------------------------------------------------------------------
// Constants
//-------------------------------------------------------------------
#ifndef _HVCALLEVENT_H
#define _HVCALLEVENT_H
struct HvLpEvent;
typedef u8 HvLpEvent_Type;
typedef u8 HvLpEvent_AckInd;
typedef u8 HvLpEvent_AckType;
struct HvCallEvent_PackedParms
{
struct HvCallEvent_PackedParms {
u8 xAckType:1;
u8 xAckInd:1;
u8 xRsvd:1;
......@@ -68,8 +51,7 @@ struct HvCallEvent_PackedParms
typedef u8 HvLpDma_Direction;
typedef u8 HvLpDma_AddressType;
struct HvCallEvent_PackedDmaParms
{
struct HvCallEvent_PackedDmaParms {
u8 xDirection:1;
u8 xLocalAddrType:1;
u8 xRemoteAddrType:1;
......@@ -101,69 +83,63 @@ typedef u64 HvLpDma_Rc;
#define HvCallEventSetLpEventQueueInterruptProc HvCallEvent + 14
#define HvCallEventRouter15 HvCallEvent + 15
//======================================================================
static inline void HvCallEvent_getOverflowLpEvents(u8 queueIndex)
static inline void HvCallEvent_getOverflowLpEvents(u8 queueIndex)
{
HvCall1(HvCallEventGetOverflowLpEvents,queueIndex);
// getPaca()->adjustHmtForNoOfSpinLocksHeld();
}
//======================================================================
static inline void HvCallEvent_setInterLpQueueIndex(u8 queueIndex)
static inline void HvCallEvent_setInterLpQueueIndex(u8 queueIndex)
{
HvCall1(HvCallEventSetInterLpQueueIndex,queueIndex);
// getPaca()->adjustHmtForNoOfSpinLocksHeld();
}
//======================================================================
static inline void HvCallEvent_setLpEventStack(u8 queueIndex,
char * eventStackAddr,
u32 eventStackSize)
static inline void HvCallEvent_setLpEventStack(u8 queueIndex,
char *eventStackAddr, u32 eventStackSize)
{
u64 abs_addr;
abs_addr = virt_to_absolute( (unsigned long) eventStackAddr );
HvCall3(HvCallEventSetLpEventStack, queueIndex, abs_addr, eventStackSize);
abs_addr = virt_to_absolute((unsigned long)eventStackAddr);
HvCall3(HvCallEventSetLpEventStack, queueIndex, abs_addr,
eventStackSize);
// getPaca()->adjustHmtForNoOfSpinLocksHeld();
}
//======================================================================
static inline void HvCallEvent_setLpEventQueueInterruptProc(u8 queueIndex,
u16 lpLogicalProcIndex)
static inline void HvCallEvent_setLpEventQueueInterruptProc(u8 queueIndex,
u16 lpLogicalProcIndex)
{
HvCall2(HvCallEventSetLpEventQueueInterruptProc,queueIndex,lpLogicalProcIndex);
HvCall2(HvCallEventSetLpEventQueueInterruptProc, queueIndex,
lpLogicalProcIndex);
// getPaca()->adjustHmtForNoOfSpinLocksHeld();
}
//=====================================================================
static inline HvLpEvent_Rc HvCallEvent_signalLpEvent(struct HvLpEvent* event)
static inline HvLpEvent_Rc HvCallEvent_signalLpEvent(struct HvLpEvent *event)
{
u64 abs_addr;
HvLpEvent_Rc retVal;
#ifdef DEBUG_SENDEVENT
printk("HvCallEvent_signalLpEvent: *event = %016lx\n ", (unsigned long)event);
printk("HvCallEvent_signalLpEvent: *event = %016lx\n ",
(unsigned long)event);
#endif
abs_addr = virt_to_absolute( (unsigned long) event );
abs_addr = virt_to_absolute((unsigned long)event);
retVal = (HvLpEvent_Rc)HvCall1(HvCallEventSignalLpEvent, abs_addr);
// getPaca()->adjustHmtForNoOfSpinLocksHeld();
return retVal;
}
//=====================================================================
static inline HvLpEvent_Rc HvCallEvent_signalLpEventFast(HvLpIndex targetLp,
HvLpEvent_Type type,
u16 subtype,
HvLpEvent_AckInd ackInd,
HvLpEvent_AckType ackType,
HvLpInstanceId sourceInstanceId,
HvLpInstanceId targetInstanceId,
u64 correlationToken,
u64 eventData1,
u64 eventData2,
u64 eventData3,
u64 eventData4,
u64 eventData5)
static inline HvLpEvent_Rc HvCallEvent_signalLpEventFast(HvLpIndex targetLp,
HvLpEvent_Type type, u16 subtype, HvLpEvent_AckInd ackInd,
HvLpEvent_AckType ackType, HvLpInstanceId sourceInstanceId,
HvLpInstanceId targetInstanceId, u64 correlationToken,
u64 eventData1, u64 eventData2, u64 eventData3,
u64 eventData4, u64 eventData5)
{
HvLpEvent_Rc retVal;
// Pack the misc bits into a single Dword to pass to PLIC
union
{
union {
struct HvCallEvent_PackedParms parms;
u64 dword;
} packed;
......@@ -177,88 +153,84 @@ static inline HvLpEvent_Rc HvCallEvent_signalLpEventFast(HvLpIndex targetLp,
packed.parms.xTargetInstId = targetInstanceId;
retVal = (HvLpEvent_Rc)HvCall7(HvCallEventSignalLpEventParms,
packed.dword,
correlationToken,
eventData1,eventData2,
eventData3,eventData4,
eventData5);
packed.dword, correlationToken, eventData1,eventData2,
eventData3,eventData4, eventData5);
// getPaca()->adjustHmtForNoOfSpinLocksHeld();
return retVal;
}
//====================================================================
static inline HvLpEvent_Rc HvCallEvent_ackLpEvent(struct HvLpEvent* event)
static inline HvLpEvent_Rc HvCallEvent_ackLpEvent(struct HvLpEvent *event)
{
u64 abs_addr;
HvLpEvent_Rc retVal;
abs_addr = virt_to_absolute( (unsigned long) event );
abs_addr = virt_to_absolute((unsigned long)event);
retVal = (HvLpEvent_Rc)HvCall1(HvCallEventAckLpEvent, abs_addr);
// getPaca()->adjustHmtForNoOfSpinLocksHeld();
return retVal;
}
//====================================================================
static inline HvLpEvent_Rc HvCallEvent_cancelLpEvent(struct HvLpEvent* event)
static inline HvLpEvent_Rc HvCallEvent_cancelLpEvent(struct HvLpEvent *event)
{
u64 abs_addr;
HvLpEvent_Rc retVal;
abs_addr = virt_to_absolute( (unsigned long) event );
abs_addr = virt_to_absolute((unsigned long)event);
retVal = (HvLpEvent_Rc)HvCall1(HvCallEventCancelLpEvent, abs_addr);
// getPaca()->adjustHmtForNoOfSpinLocksHeld();
return retVal;
}
//===================================================================
static inline HvLpInstanceId HvCallEvent_getSourceLpInstanceId(HvLpIndex targetLp, HvLpEvent_Type type)
static inline HvLpInstanceId HvCallEvent_getSourceLpInstanceId(
HvLpIndex targetLp, HvLpEvent_Type type)
{
HvLpInstanceId retVal;
retVal = HvCall2(HvCallEventGetSourceLpInstanceId,targetLp,type);
retVal = HvCall2(HvCallEventGetSourceLpInstanceId, targetLp, type);
// getPaca()->adjustHmtForNoOfSpinLocksHeld();
return retVal;
}
//===================================================================
static inline HvLpInstanceId HvCallEvent_getTargetLpInstanceId(HvLpIndex targetLp, HvLpEvent_Type type)
static inline HvLpInstanceId HvCallEvent_getTargetLpInstanceId(
HvLpIndex targetLp, HvLpEvent_Type type)
{
HvLpInstanceId retVal;
retVal = HvCall2(HvCallEventGetTargetLpInstanceId,targetLp,type);
retVal = HvCall2(HvCallEventGetTargetLpInstanceId, targetLp, type);
// getPaca()->adjustHmtForNoOfSpinLocksHeld();
return retVal;
}
//===================================================================
static inline void HvCallEvent_openLpEventPath(HvLpIndex targetLp,
HvLpEvent_Type type)
static inline void HvCallEvent_openLpEventPath(HvLpIndex targetLp,
HvLpEvent_Type type)
{
HvCall2(HvCallEventOpenLpEventPath,targetLp,type);
HvCall2(HvCallEventOpenLpEventPath, targetLp, type);
// getPaca()->adjustHmtForNoOfSpinLocksHeld();
}
//===================================================================
static inline void HvCallEvent_closeLpEventPath(HvLpIndex targetLp,
HvLpEvent_Type type)
static inline void HvCallEvent_closeLpEventPath(HvLpIndex targetLp,
HvLpEvent_Type type)
{
HvCall2(HvCallEventCloseLpEventPath,targetLp,type);
HvCall2(HvCallEventCloseLpEventPath, targetLp, type);
// getPaca()->adjustHmtForNoOfSpinLocksHeld();
}
//===================================================================
static inline HvLpDma_Rc HvCallEvent_dmaBufList(HvLpEvent_Type type,
HvLpIndex remoteLp,
HvLpDma_Direction direction,
HvLpInstanceId localInstanceId,
HvLpInstanceId remoteInstanceId,
HvLpDma_AddressType localAddressType,
HvLpDma_AddressType remoteAddressType,
// Do these need to be converted to
// absolute addresses?
u64 localBufList,
u64 remoteBufList,
u32 transferLength)
static inline HvLpDma_Rc HvCallEvent_dmaBufList(HvLpEvent_Type type,
HvLpIndex remoteLp, HvLpDma_Direction direction,
HvLpInstanceId localInstanceId,
HvLpInstanceId remoteInstanceId,
HvLpDma_AddressType localAddressType,
HvLpDma_AddressType remoteAddressType,
/* Do these need to be converted to absolute addresses? */
u64 localBufList, u64 remoteBufList, u32 transferLength)
{
HvLpDma_Rc retVal;
HvLpDma_Rc retVal;
// Pack the misc bits into a single Dword to pass to PLIC
union
{
union {
struct HvCallEvent_PackedDmaParms parms;
u64 dword;
} packed;
packed.parms.xDirection = direction;
packed.parms.xLocalAddrType = localAddressType;
packed.parms.xRemoteAddrType = remoteAddressType;
......@@ -270,32 +242,27 @@ static inline HvLpDma_Rc HvCallEvent_dmaBufList(HvLpEvent_Type type,
packed.parms.xRemoteInstId = remoteInstanceId;
retVal = (HvLpDma_Rc)HvCall4(HvCallEventDmaBufList,
packed.dword,
localBufList,
remoteBufList,
transferLength);
packed.dword, localBufList, remoteBufList,
transferLength);
// getPaca()->adjustHmtForNoOfSpinLocksHeld();
return retVal;
}
//=================================================================
static inline HvLpDma_Rc HvCallEvent_dmaSingle(HvLpEvent_Type type,
HvLpIndex remoteLp,
HvLpDma_Direction direction,
HvLpInstanceId localInstanceId,
HvLpInstanceId remoteInstanceId,
HvLpDma_AddressType localAddressType,
HvLpDma_AddressType remoteAddressType,
u64 localAddrOrTce,
u64 remoteAddrOrTce,
u32 transferLength)
static inline HvLpDma_Rc HvCallEvent_dmaSingle(HvLpEvent_Type type,
HvLpIndex remoteLp, HvLpDma_Direction direction,
HvLpInstanceId localInstanceId,
HvLpInstanceId remoteInstanceId,
HvLpDma_AddressType localAddressType,
HvLpDma_AddressType remoteAddressType,
u64 localAddrOrTce, u64 remoteAddrOrTce, u32 transferLength)
{
HvLpDma_Rc retVal;
HvLpDma_Rc retVal;
// Pack the misc bits into a single Dword to pass to PLIC
union
{
union {
struct HvCallEvent_PackedDmaParms parms;
u64 dword;
} packed;
packed.parms.xDirection = direction;
packed.parms.xLocalAddrType = localAddressType;
packed.parms.xRemoteAddrType = remoteAddressType;
......@@ -307,29 +274,24 @@ static inline HvLpDma_Rc HvCallEvent_dmaSingle(HvLpEvent_Type type,
packed.parms.xRemoteInstId = remoteInstanceId;
retVal = (HvLpDma_Rc)HvCall4(HvCallEventDmaSingle,
packed.dword,
localAddrOrTce,
remoteAddrOrTce,
transferLength);
packed.dword, localAddrOrTce, remoteAddrOrTce,
transferLength);
// getPaca()->adjustHmtForNoOfSpinLocksHeld();
return retVal;
}
//=================================================================
static inline HvLpDma_Rc HvCallEvent_dmaToSp(void* local, u32 remote, u32 length, HvLpDma_Direction dir)
static inline HvLpDma_Rc HvCallEvent_dmaToSp(void* local, u32 remote,
u32 length, HvLpDma_Direction dir)
{
u64 abs_addr;
HvLpDma_Rc retVal;
abs_addr = virt_to_absolute( (unsigned long) local );
retVal = (HvLpDma_Rc)HvCall4(HvCallEventDmaToSp,
abs_addr,
remote,
length,
dir);
abs_addr = virt_to_absolute((unsigned long)local);
retVal = (HvLpDma_Rc)HvCall4(HvCallEventDmaToSp, abs_addr, remote,
length, dir);
// getPaca()->adjustHmtForNoOfSpinLocksHeld();
return retVal;
}
//================================================================
#endif // _HVCALLEVENT_H
#endif /* _HVCALLEVENT_H */
......@@ -16,6 +16,8 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#ifndef _HVCALLHPT_H
#define _HVCALLHPT_H
//============================================================================
//
......@@ -24,30 +26,13 @@
//
//============================================================================
//-------------------------------------------------------------------
// Standard Includes
//-------------------------------------------------------------------
#ifndef _HVCALLSC_H
#include "HvCallSc.h"
#endif
#ifndef _HVTYPES_H
#include <asm/iSeries/HvCallSc.h>
#include <asm/iSeries/HvTypes.h>
#endif
//-------------------------------------------------------------------
// Other Includes
//-------------------------------------------------------------------
#ifndef _PPC_MMU_H
#include <asm/mmu.h>
#endif
//-----------------------------------------------------------------------------
// Constants
//-----------------------------------------------------------------------------
#ifndef _HVCALLHPT_H
#define _HVCALLHPT_H
#define HvCallHptGetHptAddress HvCallHpt + 0
#define HvCallHptGetHptPages HvCallHpt + 1
......@@ -139,5 +124,4 @@ static inline void HvCallHpt_addValidate( u32 hpteIndex,
//=============================================================================
#endif // _HVCALLHPT_H
#endif /* _HVCALLHPT_H */
......@@ -31,6 +31,8 @@
// drive the hypervisor from SLIC.
//
//============================================================================
#ifndef _HVCALLPCI_H
#define _HVCALLPCI_H
//-------------------------------------------------------------------
// Forward declarations
......@@ -39,24 +41,12 @@
//-------------------------------------------------------------------
// Standard Includes
//-------------------------------------------------------------------
#ifndef _HVCALLSC_H
#include "HvCallSc.h"
#endif
#ifndef _HVTYPES_H
#include <asm/iSeries/HvCallSc.h>
#include <asm/iSeries/HvTypes.h>
#endif
//-------------------------------------------------------------------
// Other Includes
//-------------------------------------------------------------------
//-----------------------------------------------------------------------------
// Constants
//-----------------------------------------------------------------------------
#ifndef _HVCALLPCI_H
#define _HVCALLPCI_H
struct HvCallPci_DsaAddr { // make sure this struct size is 64-bits total
u16 busNumber;
......@@ -694,4 +684,4 @@ static inline int HvCallPci_getBusAdapterVpd(u16 busNumParm, u64 destParm, u16 s
return xRetSize;
}
//============================================================================
#endif // _HVCALLPCI_H
#endif /* _HVCALLPCI_H */
......@@ -16,14 +16,11 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#ifndef _HVTYPES_H
#include <asm/iSeries/HvTypes.h>
#endif
#ifndef _HVCALLSC_H
#define _HVCALLSC_H
#include <asm/iSeries/HvTypes.h>
#define HvCallBase 0x8000000000000000
#define HvCallCc 0x8001000000000000
#define HvCallCfg 0x8002000000000000
......
......@@ -16,6 +16,8 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#ifndef _HVCALLSM_H
#define _HVCALLSM_H
//============================================================================
//
......@@ -27,19 +29,12 @@
//-------------------------------------------------------------------
// Standard Includes
//-------------------------------------------------------------------
#ifndef _HVCALLSC_H
#include "HvCallSc.h"
#endif
#ifndef _HVTYPES_H
#include <asm/iSeries/HvCallSc.h>
#include <asm/iSeries/HvTypes.h>
#endif
//-----------------------------------------------------------------------------
// Constants
//-----------------------------------------------------------------------------
#ifndef _HVCALLSM_H
#define _HVCALLSM_H
#define HvCallSmGet64BitsOfAccessMap HvCallSm + 11
......@@ -54,5 +49,4 @@ static inline u64 HvCallSm_get64BitsOfAccessMap(
return retval;
}
//============================================================================
#endif // _HVCALLSM_H
#endif /* _HVCALLSM_H */
......@@ -8,6 +8,8 @@
// drive the hypervisor from SLIC.
//
//============================================================================
#ifndef _HVCALLXM_H
#define _HVCALLXM_H
//-------------------------------------------------------------------
// Forward declarations
......@@ -16,24 +18,12 @@
//-------------------------------------------------------------------
// Standard Includes
//-------------------------------------------------------------------
#ifndef _HVCALLSC_H
#include "HvCallSc.h"
#endif
#ifndef _HVTYPES_H
#include <asm/iSeries/HvCallSc.h>
#include <asm/iSeries/HvTypes.h>
#endif
//-------------------------------------------------------------------
// Other Includes
//-------------------------------------------------------------------
//-----------------------------------------------------------------------------
// Constants
//-----------------------------------------------------------------------------
#ifndef _HVCALLXM_H
#define _HVCALLXM_H
#define HvCallXmGetTceTableParms HvCallXm + 0
#define HvCallXmTestBus HvCallXm + 1
......@@ -102,5 +92,4 @@ static inline u64 HvCallXm_loadTod(void)
}
//=====================================================================================
#endif // _HVCALLXM_H
#endif /* _HVCALLXM_H */
......@@ -16,6 +16,8 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#ifndef _HVLPCONFIG_H
#define _HVLPCONFIG_H
//===========================================================================
//
......@@ -24,24 +26,10 @@
//
//===========================================================================
#ifndef _HVCALLCFG_H
#include "HvCallCfg.h"
#endif
#ifndef _HVTYPES_H
#include <asm/iSeries/HvCallCfg.h>
#include <asm/iSeries/HvTypes.h>
#endif
#ifndef _ITLPNACA_H
#include <asm/iSeries/ItLpNaca.h>
#endif
#ifndef _LPARDATA_H
#include <asm/iSeries/LparData.h>
#endif
#ifndef _HVLPCONFIG_H
#define _HVLPCONFIG_H
//-------------------------------------------------------------------
// Constants
......@@ -289,4 +277,4 @@ static inline HvLpIndex HvLpConfig_getHostingLpIndex(HvLpIndex lp)
}
//================================================================
#endif // _HVLPCONFIG_H
#endif /* _HVLPCONFIG_H */
......@@ -28,10 +28,7 @@
#include <asm/types.h>
#include <asm/ptrace.h>
#include <asm/iSeries/HvTypes.h>
#ifndef _HVCALLEVENT_H
#include <asm/iSeries/HvCallEvent.h>
#endif
//=====================================================================
//
......
......@@ -16,6 +16,8 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#ifndef _HVRELEASEDATA_H
#define _HVRELEASEDATA_H
//=============================================================================
//
......@@ -23,15 +25,7 @@
// release so that it can be changed in the future (ie, the virtual
// address of the OS's NACA).
//
//-----------------------------------------------------------------------------
// Standard Includes
//-----------------------------------------------------------------------------
#ifndef _PPC64_TYPES_H
#include <asm/types.h>
#endif
#ifndef _HVRELEASEDATA_H
#define _HVRELEASEDATA_H
#include <asm/types.h>
//=============================================================================
//
......@@ -67,4 +61,4 @@ struct HvReleaseData
char xRsvd3[20]; // Reserved x2C-x3F
};
#endif // _HVRELEASEDATA_H
#endif /* _HVRELEASEDATA_H */
......@@ -16,6 +16,8 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#ifndef _HVTYPES_H
#define _HVTYPES_H
//===========================================================================
// Header File Id
......@@ -29,13 +31,7 @@
//
//===========================================================================
#ifndef _PPC_TYPES_H
#include <asm/types.h>
#endif
#ifndef _HVTYPES_H
#define _HVTYPES_H
#include <asm/types.h>
//-------------------------------------------------------------------
// Typedefs
......@@ -124,4 +120,4 @@ struct HvLpBufferList {
u64 len;
};
#endif // _HVTYPES_H
#endif /* _HVTYPES_H */
......@@ -16,18 +16,15 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#ifndef _IOHRIPROCESSORVPD_H
#define _IOHRIPROCESSORVPD_H
//===================================================================
//
// This struct maps Processor Vpd that is DMAd to SLIC by CSP
//
#ifndef _TYPES_H
#include <asm/types.h>
#endif
#ifndef _IOHRIPROCESSORVPD_H
#define _IOHRIPROCESSORVPD_H
struct IoHriProcessorVpd
{
......@@ -87,4 +84,5 @@ struct IoHriProcessorVpd
char xProcSrc[72]; // CSP format SRC xB8-xFF
};
#endif // _IOHRIPROCESSORVPD_H
#endif /* _IOHRIPROCESSORVPD_H */
......@@ -16,6 +16,8 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#ifndef _ITEXTVPDPANEL_H
#define _ITEXTVPDPANEL_H
/*
*
......@@ -31,12 +33,8 @@
* Standard Includes
*-------------------------------------------------------------------
*/
#ifndef _PPC_TYPES_H
#include <asm/types.h>
#endif
#include <asm/types.h>
#ifndef _ITEXTVPDPANEL_H
#define _ITEXTVPDPANEL_H
struct ItExtVpdPanel
{
// Definition of the Extended Vpd On Panel Data Area
......
......@@ -16,6 +16,8 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#ifndef _ITIPLPARMSREAL_H
#define _ITIPLPARMSREAL_H
//==============================================================================
//
......@@ -31,12 +33,7 @@
//-------------------------------------------------------------------
// Standard Includes
//-------------------------------------------------------------------
#ifndef _PPC_TYPES_H
#include <asm/types.h>
#endif
#ifndef _ITIPLPARMSREAL_H
#define _ITIPLPARMSREAL_H
#include <asm/types.h>
struct ItIplParmsReal
{
......@@ -75,4 +72,5 @@ struct ItIplParmsReal
u64 xRsvd12; // Reserved x30-x37
u64 xRsvd13; // Reserved x38-x3F
};
#endif // _ITIPLPARMSREAL_H
#endif /* _ITIPLPARMSREAL_H */
......@@ -16,6 +16,8 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#ifndef _ITLPNACA_H
#define _ITLPNACA_H
//=============================================================================
//
......@@ -24,10 +26,6 @@
//
//=============================================================================
#ifndef _ITLPNACA_H
#define _ITLPNACA_H
struct ItLpNaca
{
//=============================================================================
......@@ -87,4 +85,4 @@ struct ItLpNaca
//=============================================================================
#endif // _ITLPNACA_H
#endif /* _ITLPNACA_H */
......@@ -16,6 +16,8 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#ifndef _ITLPPACA_H
#define _ITLPPACA_H
//=============================================================================
//
......@@ -24,13 +26,7 @@
//
//
//----------------------------------------------------------------------------
#ifndef _PPC_TYPES_H
#include <asm/types.h>
#endif
#ifndef _ITLPPACA_H
#define _ITLPPACA_H
struct ItLpPaca
{
......@@ -134,4 +130,5 @@ struct ItLpPaca
};
#endif // _ITLPPACA_H
#endif /* _ITLPPACA_H */
......@@ -16,6 +16,8 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#ifndef _ITLPQUEUE_H
#define _ITLPQUEUE_H
//=============================================================================
//
......@@ -24,18 +26,11 @@
// events to an LP.
//
#ifndef _PPC_TYPES_H
#include <asm/types.h>
#endif
#include <asm/ptrace.h>
struct HvLpEvent;
#ifndef _ITLPQUEUE_H
#define _ITLPQUEUE_H
#define ITMaxLpQueues 8
#define NotUsed 0 // Queue will not be used by PLIC
......@@ -94,6 +89,4 @@ static __inline__ void process_iSeries_events( void )
: : : "r0", "r3" );
}
//=============================================================================
#endif // _ITLPQUEUE_H
#endif /* _ITLPQUEUE_H */
......@@ -16,6 +16,8 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#ifndef _ITLPREGSAVE_H
#define _ITLPREGSAVE_H
//=====================================================================================
//
......@@ -24,9 +26,6 @@
//
//
#ifndef _ITLPREGSAVE_H
#define _ITLPREGSAVE_H
struct ItLpRegSave
{
u32 xDesc; // Eye catcher "LpRS" ebcdic 000-003
......@@ -84,4 +83,5 @@ struct ItLpRegSave
u8 xRsvd3[176]; // Reserved 350-3FF
};
#endif // _ITLPREGSAVE_H
#endif /* _ITLPREGSAVE_H */
......@@ -16,6 +16,8 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#ifndef _ITVPDAREAS_H
#define _ITVPDAREAS_H
//=====================================================================================
//
......@@ -23,13 +25,7 @@
// the OS from PLIC (most of which start from the SP).
//
#ifndef _PPC_TYPES_H
#include <asm/types.h>
#endif
#ifndef _ITVPDAREAS_H
#define _ITVPDAREAS_H
#include <asm/types.h>
// VPD Entry index is carved in stone - cannot be changed (easily).
#define ItVpdCecVpd 0
......@@ -97,4 +93,4 @@ struct ItVpdAreas
void * xSlicVpdAdrs[ItVpdMaxEntries];// Array of VPD buffers 130-1EF
};
#endif // _ITVPDAREAS_H
#endif /* _ITVPDAREAS_H */
......@@ -16,14 +16,11 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#ifndef _PPC_TYPES_H
#include <asm/types.h>
#endif
#ifndef _LPARMAP_H
#define _LPARMAP_H
#include <asm/types.h>
/* The iSeries hypervisor will set up mapping for one or more
* ESID/VSID pairs (in SLB/segment registers) and will set up
* mappings of one or more ranges of pages to VAs.
......
......@@ -21,9 +21,7 @@
#define _ISERIES_DMA_H
#include <asm/types.h>
#ifndef __LINUX_SPINLOCK_H
#include <linux/spinlock.h>
#endif
// NUM_TCE_LEVELS defines the largest contiguous block
// of dma (tce) space we can get. NUM_TCE_LEVELS = 10
......@@ -94,4 +92,4 @@ extern void create_virtual_bus_tce_table( void );
extern void create_pci_bus_tce_table( unsigned busNumber );
#endif // _ISERIES_DMA_H
#endif /* _ISERIES_DMA_H */
#ifndef _ISERIES_IO_H
#define _ISERIES_IO_H
#include <linux/config.h>
#ifdef CONFIG_PPC_ISERIES
#ifndef _ISERIES_IO_H
#define _ISERIES_IO_H
#include <linux/types.h>
/************************************************************************/
/* File iSeries_io.h created by Allan Trautman on Thu Dec 28 2000. */
......@@ -41,6 +42,5 @@ extern void* iSeries_memset_io(void *dest, char x, size_t n);
extern void* iSeries_memcpy_toio(void *dest, void *source, size_t n);
extern void* iSeries_memcpy_fromio(void *dest, void *source, size_t n);
#endif /* _ISERIES_IO_H */
#endif /* CONFIG_PPC_ISERIES */
#endif /* CONFIG_PPC_ISERIES */
#endif /* _ISERIES_IO_H */
#ifndef __ISERIES_IRQ_H__
#define __ISERIES_IRQ_H__
#ifdef __cplusplus
extern "C" {
#endif
unsigned int iSeries_startup_IRQ(unsigned int);
void iSeries_shutdown_IRQ(unsigned int);
void iSeries_enable_IRQ(unsigned int);
void iSeries_disable_IRQ(unsigned int);
void iSeries_end_IRQ(unsigned int);
void iSeries_init_IRQ(void);
void iSeries_init_irqMap(int);
int iSeries_allocate_IRQ(HvBusNumber, HvSubBusNumber, HvAgentId);
int iSeries_assign_IRQ(int, HvBusNumber, HvSubBusNumber, HvAgentId);
void iSeries_activate_IRQs(void);
......
/* -*- linux-c -*-
* drivers/char/vio.h
*
* iSeries Virtual I/O Message Path header
*
* Authors: Dave Boutcher <boutcher@us.ibm.com>
* Ryan Arnold <ryanarn@us.ibm.com>
* Colin Devilbiss <devilbis@us.ibm.com>
*
* (C) Copyright 2000 IBM Corporation
*
* This header file is used by the iSeries virtual I/O device
* drivers. It defines the interfaces to the common functions
* (implemented in drivers/char/viopath.h) as well as defining
* common functions and structures. Currently (at the time I
* wrote this comment) the iSeries virtual I/O device drivers
* that use this are
* drivers/block/viodasd.c
* drivers/char/viocons.c
* drivers/char/viotape.c
* drivers/cdrom/viocd.c
*
* The iSeries virtual ethernet support (veth.c) uses a whole
* different set of functions.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation; either version 2 of the
* License, or (at your option) anyu later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
#ifndef _VIO_H
#define _VIO_H
#include <asm/iSeries/HvTypes.h>
#include <asm/iSeries/HvLpEvent.h>
/* iSeries virtual I/O events use the subtype field in
* HvLpEvent to figure out what kind of vio event is coming
* in. We use a table to route these, and this defines
* the maximum number of distinct subtypes
*/
#define VIO_MAX_SUBTYPES 7
/* Each subtype can register a handler to process their events.
* The handler must have this interface.
*/
typedef void (vio_event_handler_t) (struct HvLpEvent * event);
int viopath_open(HvLpIndex remoteLp, int subtype, int numReq);
int viopath_close(HvLpIndex remoteLp, int subtype, int numReq);
int vio_setHandler(int subtype, vio_event_handler_t * beh);
int vio_clearHandler(int subtype);
int viopath_isactive(HvLpIndex lp);
HvLpInstanceId viopath_sourceinst(HvLpIndex lp);
HvLpInstanceId viopath_targetinst(HvLpIndex lp);
void vio_set_hostlp(void);
void *vio_get_event_buffer(int subtype);
void vio_free_event_buffer(int subtype, void *buffer);
extern HvLpIndex viopath_hostLp;
extern HvLpIndex viopath_ourLp;
#define VIO_MESSAGE "iSeries virtual I/O: "
#define KERN_DEBUG_VIO KERN_DEBUG VIO_MESSAGE
#define KERN_INFO_VIO KERN_INFO VIO_MESSAGE
#define KERN_WARNING_VIO KERN_WARNING VIO_MESSAGE
#define VIOCHAR_MAX_DATA 200
#define VIOMAJOR_SUBTYPE_MASK 0xff00
#define VIOMINOR_SUBTYPE_MASK 0x00ff
#define VIOMAJOR_SUBTYPE_SHIFT 8
#define VIOVERSION 0x0101
/*
This is the general structure for VIO errors; each module should have a table
of them, and each table should be terminated by an entry of { 0, 0, NULL }.
Then, to find a specific error message, a module should pass its local table
and the return code.
*/
struct vio_error_entry {
u16 rc;
int errno;
const char *msg;
};
const struct vio_error_entry *vio_lookup_rc(const struct vio_error_entry
*local_table, u16 rc);
enum viosubtypes {
viomajorsubtype_monitor = 0x0100,
viomajorsubtype_blockio = 0x0200,
viomajorsubtype_chario = 0x0300,
viomajorsubtype_config = 0x0400,
viomajorsubtype_cdio = 0x0500,
viomajorsubtype_tape = 0x0600
};
enum vioconfigsubtype {
vioconfigget = 0x0001,
};
enum viorc {
viorc_good = 0x0000,
viorc_noConnection = 0x0001,
viorc_noReceiver = 0x0002,
viorc_noBufferAvailable = 0x0003,
viorc_invalidMessageType = 0x0004,
viorc_invalidRange = 0x0201,
viorc_invalidToken = 0x0202,
viorc_DMAError = 0x0203,
viorc_useError = 0x0204,
viorc_releaseError = 0x0205,
viorc_invalidDisk = 0x0206,
viorc_openRejected = 0x0301
};
#endif /* _VIO_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment