Commit 45c091bb authored by Linus Torvalds's avatar Linus Torvalds

Merge git://git.kernel.org/pub/scm/linux/kernel/git/paulus/powerpc

* git://git.kernel.org/pub/scm/linux/kernel/git/paulus/powerpc: (139 commits)
  [POWERPC] re-enable OProfile for iSeries, using timer interrupt
  [POWERPC] support ibm,extended-*-frequency properties
  [POWERPC] Extra sanity check in EEH code
  [POWERPC] Dont look for class-code in pci children
  [POWERPC] Fix mdelay badness on shared processor partitions
  [POWERPC] disable floating point exceptions for init
  [POWERPC] Unify ppc syscall tables
  [POWERPC] mpic: add support for serial mode interrupts
  [POWERPC] pseries: Print PCI slot location code on failure
  [POWERPC] spufs: one more fix for 64k pages
  [POWERPC] spufs: fail spu_create with invalid flags
  [POWERPC] spufs: clear class2 interrupt status before wakeup
  [POWERPC] spufs: fix Makefile for "make clean"
  [POWERPC] spufs: remove stop_code from struct spu
  [POWERPC] spufs: fix spu irq affinity setting
  [POWERPC] spufs: further abstract priv1 register access
  [POWERPC] spufs: split the Cell BE support into generic and platform dependant parts
  [POWERPC] spufs: dont try to access SPE channel 1 count
  [POWERPC] spufs: use kzalloc in create_spu
  [POWERPC] spufs: fix initial state of wbox file
  ...

Manually resolved conflicts in:
	drivers/net/phy/Makefile
	include/asm-powerpc/spu.h
parents d588fcbe 2191fe3e
...@@ -45,6 +45,10 @@ config GENERIC_CALIBRATE_DELAY ...@@ -45,6 +45,10 @@ config GENERIC_CALIBRATE_DELAY
bool bool
default y default y
config GENERIC_FIND_NEXT_BIT
bool
default y
config PPC config PPC
bool bool
default y default y
...@@ -137,6 +141,15 @@ config PPC_85xx ...@@ -137,6 +141,15 @@ config PPC_85xx
select FSL_SOC select FSL_SOC
select 85xx select 85xx
config PPC_86xx
bool "Freescale 86xx"
select 6xx
select FSL_SOC
select PPC_FPU
select ALTIVEC
help
The Freescale E600 SoCs have 74xx cores.
config 40x config 40x
bool "AMCC 40x" bool "AMCC 40x"
...@@ -336,7 +349,7 @@ endchoice ...@@ -336,7 +349,7 @@ endchoice
config PPC_PSERIES config PPC_PSERIES
depends on PPC_MULTIPLATFORM && PPC64 depends on PPC_MULTIPLATFORM && PPC64
bool " IBM pSeries & new (POWER5-based) iSeries" bool "IBM pSeries & new (POWER5-based) iSeries"
select PPC_I8259 select PPC_I8259
select PPC_RTAS select PPC_RTAS
select RTAS_ERROR_LOGGING select RTAS_ERROR_LOGGING
...@@ -344,7 +357,7 @@ config PPC_PSERIES ...@@ -344,7 +357,7 @@ config PPC_PSERIES
default y default y
config PPC_CHRP config PPC_CHRP
bool " Common Hardware Reference Platform (CHRP) based machines" bool "Common Hardware Reference Platform (CHRP) based machines"
depends on PPC_MULTIPLATFORM && PPC32 depends on PPC_MULTIPLATFORM && PPC32
select PPC_I8259 select PPC_I8259
select PPC_INDIRECT_PCI select PPC_INDIRECT_PCI
...@@ -354,7 +367,7 @@ config PPC_CHRP ...@@ -354,7 +367,7 @@ config PPC_CHRP
default y default y
config PPC_PMAC config PPC_PMAC
bool " Apple PowerMac based machines" bool "Apple PowerMac based machines"
depends on PPC_MULTIPLATFORM depends on PPC_MULTIPLATFORM
select PPC_INDIRECT_PCI if PPC32 select PPC_INDIRECT_PCI if PPC32
select PPC_MPC106 if PPC32 select PPC_MPC106 if PPC32
...@@ -370,7 +383,7 @@ config PPC_PMAC64 ...@@ -370,7 +383,7 @@ config PPC_PMAC64
default y default y
config PPC_PREP config PPC_PREP
bool " PowerPC Reference Platform (PReP) based machines" bool "PowerPC Reference Platform (PReP) based machines"
depends on PPC_MULTIPLATFORM && PPC32 && BROKEN depends on PPC_MULTIPLATFORM && PPC32 && BROKEN
select PPC_I8259 select PPC_I8259
select PPC_INDIRECT_PCI select PPC_INDIRECT_PCI
...@@ -379,7 +392,7 @@ config PPC_PREP ...@@ -379,7 +392,7 @@ config PPC_PREP
config PPC_MAPLE config PPC_MAPLE
depends on PPC_MULTIPLATFORM && PPC64 depends on PPC_MULTIPLATFORM && PPC64
bool " Maple 970FX Evaluation Board" bool "Maple 970FX Evaluation Board"
select U3_DART select U3_DART
select MPIC_BROKEN_U3 select MPIC_BROKEN_U3
select GENERIC_TBSYNC select GENERIC_TBSYNC
...@@ -391,8 +404,18 @@ config PPC_MAPLE ...@@ -391,8 +404,18 @@ config PPC_MAPLE
For more informations, refer to <http://www.970eval.com> For more informations, refer to <http://www.970eval.com>
config PPC_CELL config PPC_CELL
bool " Cell Broadband Processor Architecture" bool
default n
config PPC_CELL_NATIVE
bool
select PPC_CELL
default n
config PPC_IBM_CELL_BLADE
bool " IBM Cell Blade"
depends on PPC_MULTIPLATFORM && PPC64 depends on PPC_MULTIPLATFORM && PPC64
select PPC_CELL_NATIVE
select PPC_RTAS select PPC_RTAS
select MMIO_NVRAM select MMIO_NVRAM
select PPC_UDBG_16550 select PPC_UDBG_16550
...@@ -439,11 +462,6 @@ config MPIC_BROKEN_U3 ...@@ -439,11 +462,6 @@ config MPIC_BROKEN_U3
depends on PPC_MAPLE depends on PPC_MAPLE
default y default y
config CELL_IIC
depends on PPC_CELL
bool
default y
config IBMVIO config IBMVIO
depends on PPC_PSERIES || PPC_ISERIES depends on PPC_PSERIES || PPC_ISERIES
bool bool
...@@ -545,6 +563,7 @@ source arch/powerpc/platforms/embedded6xx/Kconfig ...@@ -545,6 +563,7 @@ source arch/powerpc/platforms/embedded6xx/Kconfig
source arch/powerpc/platforms/4xx/Kconfig source arch/powerpc/platforms/4xx/Kconfig
source arch/powerpc/platforms/83xx/Kconfig source arch/powerpc/platforms/83xx/Kconfig
source arch/powerpc/platforms/85xx/Kconfig source arch/powerpc/platforms/85xx/Kconfig
source arch/powerpc/platforms/86xx/Kconfig
source arch/powerpc/platforms/8xx/Kconfig source arch/powerpc/platforms/8xx/Kconfig
source arch/powerpc/platforms/cell/Kconfig source arch/powerpc/platforms/cell/Kconfig
...@@ -776,6 +795,7 @@ config GENERIC_ISA_DMA ...@@ -776,6 +795,7 @@ config GENERIC_ISA_DMA
config PPC_I8259 config PPC_I8259
bool bool
default y if MPC8641_HPCN
default n default n
config PPC_INDIRECT_PCI config PPC_INDIRECT_PCI
...@@ -798,8 +818,8 @@ config MCA ...@@ -798,8 +818,8 @@ config MCA
bool bool
config PCI config PCI
bool "PCI support" if 40x || CPM2 || PPC_83xx || PPC_85xx || PPC_MPC52xx || (EMBEDDED && PPC_ISERIES) bool "PCI support" if 40x || CPM2 || PPC_83xx || PPC_85xx || PPC_86xx || PPC_MPC52xx || (EMBEDDED && PPC_ISERIES)
default y if !40x && !CPM2 && !8xx && !APUS && !PPC_83xx && !PPC_85xx default y if !40x && !CPM2 && !8xx && !APUS && !PPC_83xx && !PPC_85xx && !PPC_86xx
default PCI_PERMEDIA if !4xx && !CPM2 && !8xx && APUS default PCI_PERMEDIA if !4xx && !CPM2 && !8xx && APUS
default PCI_QSPAN if !4xx && !CPM2 && 8xx default PCI_QSPAN if !4xx && !CPM2 && 8xx
help help
...@@ -827,12 +847,12 @@ config PCI_8260 ...@@ -827,12 +847,12 @@ config PCI_8260
default y default y
config 8260_PCI9 config 8260_PCI9
bool " Enable workaround for MPC826x erratum PCI 9" bool "Enable workaround for MPC826x erratum PCI 9"
depends on PCI_8260 && !ADS8272 depends on PCI_8260 && !ADS8272
default y default y
choice choice
prompt " IDMA channel for PCI 9 workaround" prompt "IDMA channel for PCI 9 workaround"
depends on 8260_PCI9 depends on 8260_PCI9
config 8260_PCI9_IDMA1 config 8260_PCI9_IDMA1
...@@ -849,6 +869,8 @@ config 8260_PCI9_IDMA4 ...@@ -849,6 +869,8 @@ config 8260_PCI9_IDMA4
endchoice endchoice
source "drivers/pci/pcie/Kconfig"
source "drivers/pci/Kconfig" source "drivers/pci/Kconfig"
source "drivers/pcmcia/Kconfig" source "drivers/pcmcia/Kconfig"
......
...@@ -110,13 +110,16 @@ config SERIAL_TEXT_DEBUG ...@@ -110,13 +110,16 @@ config SERIAL_TEXT_DEBUG
depends on 4xx || LOPEC || MV64X60 || PPLUS || PRPMC800 || \ depends on 4xx || LOPEC || MV64X60 || PPLUS || PRPMC800 || \
PPC_GEN550 || PPC_MPC52xx PPC_GEN550 || PPC_MPC52xx
config PPC_EARLY_DEBUG
bool "Early debugging (dangerous)"
choice choice
prompt "Early debugging (dangerous)" prompt "Early debugging console"
bool depends on PPC_EARLY_DEBUG
optional
help help
Enable early debugging. Careful, if you enable debugging for the Use the selected console for early debugging. Careful, if you
wrong type of machine your kernel _will not boot_. enable debugging for the wrong type of machine your kernel
_will not boot_.
config PPC_EARLY_DEBUG_LPAR config PPC_EARLY_DEBUG_LPAR
bool "LPAR HV Console" bool "LPAR HV Console"
......
...@@ -108,7 +108,6 @@ ifeq ($(CONFIG_6xx),y) ...@@ -108,7 +108,6 @@ ifeq ($(CONFIG_6xx),y)
CFLAGS += -mcpu=powerpc CFLAGS += -mcpu=powerpc
endif endif
cpu-as-$(CONFIG_PPC64BRIDGE) += -Wa,-mppc64bridge
cpu-as-$(CONFIG_4xx) += -Wa,-m405 cpu-as-$(CONFIG_4xx) += -Wa,-m405
cpu-as-$(CONFIG_6xx) += -Wa,-maltivec cpu-as-$(CONFIG_6xx) += -Wa,-maltivec
cpu-as-$(CONFIG_POWER4) += -Wa,-maltivec cpu-as-$(CONFIG_POWER4) += -Wa,-maltivec
......
...@@ -33,6 +33,14 @@ extern char _vmlinux_end[]; ...@@ -33,6 +33,14 @@ extern char _vmlinux_end[];
extern char _initrd_start[]; extern char _initrd_start[];
extern char _initrd_end[]; extern char _initrd_end[];
/* A buffer that may be edited by tools operating on a zImage binary so as to
* edit the command line passed to vmlinux (by setting /chosen/bootargs).
* The buffer is put in it's own section so that tools may locate it easier.
*/
static char builtin_cmdline[512]
__attribute__((section("__builtin_cmdline")));
struct addr_range { struct addr_range {
unsigned long addr; unsigned long addr;
unsigned long size; unsigned long size;
...@@ -204,6 +212,23 @@ static int is_elf32(void *hdr) ...@@ -204,6 +212,23 @@ static int is_elf32(void *hdr)
return 1; return 1;
} }
void export_cmdline(void* chosen_handle)
{
int len;
char cmdline[2] = { 0, 0 };
if (builtin_cmdline[0] == 0)
return;
len = getprop(chosen_handle, "bootargs", cmdline, sizeof(cmdline));
if (len > 0 && cmdline[0] != 0)
return;
setprop(chosen_handle, "bootargs", builtin_cmdline,
strlen(builtin_cmdline) + 1);
}
void start(unsigned long a1, unsigned long a2, void *promptr, void *sp) void start(unsigned long a1, unsigned long a2, void *promptr, void *sp)
{ {
int len; int len;
...@@ -289,6 +314,8 @@ void start(unsigned long a1, unsigned long a2, void *promptr, void *sp) ...@@ -289,6 +314,8 @@ void start(unsigned long a1, unsigned long a2, void *promptr, void *sp)
memmove((void *)vmlinux.addr,(void *)vmlinuz.addr,vmlinuz.size); memmove((void *)vmlinux.addr,(void *)vmlinuz.addr,vmlinuz.size);
} }
export_cmdline(chosen_handle);
/* Skip over the ELF header */ /* Skip over the ELF header */
#ifdef DEBUG #ifdef DEBUG
printf("... skipping 0x%lx bytes of ELF header\n\r", printf("... skipping 0x%lx bytes of ELF header\n\r",
......
...@@ -31,4 +31,11 @@ static inline int getprop(void *phandle, const char *name, ...@@ -31,4 +31,11 @@ static inline int getprop(void *phandle, const char *name,
return call_prom("getprop", 4, 1, phandle, name, buf, buflen); return call_prom("getprop", 4, 1, phandle, name, buf, buflen);
} }
static inline int setprop(void *phandle, const char *name,
void *buf, int buflen)
{
return call_prom("setprop", 4, 1, phandle, name, buf, buflen);
}
#endif /* _PPC_BOOT_PROM_H_ */ #endif /* _PPC_BOOT_PROM_H_ */
# #
# Automatically generated make config: don't edit # Automatically generated make config: don't edit
# Linux kernel version: 2.6.16 # Linux kernel version: 2.6.17
# Thu Mar 23 20:48:09 2006 # Mon Jun 19 17:23:03 2006
# #
CONFIG_PPC64=y CONFIG_PPC64=y
CONFIG_64BIT=y CONFIG_64BIT=y
...@@ -11,6 +11,7 @@ CONFIG_GENERIC_HARDIRQS=y ...@@ -11,6 +11,7 @@ CONFIG_GENERIC_HARDIRQS=y
CONFIG_RWSEM_XCHGADD_ALGORITHM=y CONFIG_RWSEM_XCHGADD_ALGORITHM=y
CONFIG_GENERIC_HWEIGHT=y CONFIG_GENERIC_HWEIGHT=y
CONFIG_GENERIC_CALIBRATE_DELAY=y CONFIG_GENERIC_CALIBRATE_DELAY=y
CONFIG_GENERIC_FIND_NEXT_BIT=y
CONFIG_PPC=y CONFIG_PPC=y
CONFIG_EARLY_PRINTK=y CONFIG_EARLY_PRINTK=y
CONFIG_COMPAT=y CONFIG_COMPAT=y
...@@ -55,7 +56,7 @@ CONFIG_SYSCTL=y ...@@ -55,7 +56,7 @@ CONFIG_SYSCTL=y
# CONFIG_AUDIT is not set # CONFIG_AUDIT is not set
CONFIG_IKCONFIG=y CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y CONFIG_IKCONFIG_PROC=y
# CONFIG_CPUSETS is not set CONFIG_CPUSETS=y
# CONFIG_RELAY is not set # CONFIG_RELAY is not set
CONFIG_INITRAMFS_SOURCE="" CONFIG_INITRAMFS_SOURCE=""
CONFIG_CC_OPTIMIZE_FOR_SIZE=y CONFIG_CC_OPTIMIZE_FOR_SIZE=y
...@@ -116,13 +117,15 @@ CONFIG_PPC_MULTIPLATFORM=y ...@@ -116,13 +117,15 @@ CONFIG_PPC_MULTIPLATFORM=y
# CONFIG_PPC_PMAC is not set # CONFIG_PPC_PMAC is not set
# CONFIG_PPC_MAPLE is not set # CONFIG_PPC_MAPLE is not set
CONFIG_PPC_CELL=y CONFIG_PPC_CELL=y
CONFIG_PPC_CELL_NATIVE=y
CONFIG_PPC_IBM_CELL_BLADE=y
CONFIG_PPC_SYSTEMSIM=y
# CONFIG_U3_DART is not set # CONFIG_U3_DART is not set
CONFIG_PPC_RTAS=y CONFIG_PPC_RTAS=y
# CONFIG_RTAS_ERROR_LOGGING is not set # CONFIG_RTAS_ERROR_LOGGING is not set
CONFIG_RTAS_PROC=y CONFIG_RTAS_PROC=y
CONFIG_RTAS_FLASH=y CONFIG_RTAS_FLASH=y
CONFIG_MMIO_NVRAM=y CONFIG_MMIO_NVRAM=y
CONFIG_CELL_IIC=y
# CONFIG_PPC_MPC106 is not set # CONFIG_PPC_MPC106 is not set
# CONFIG_PPC_970_NAP is not set # CONFIG_PPC_970_NAP is not set
# CONFIG_CPU_FREQ is not set # CONFIG_CPU_FREQ is not set
...@@ -132,7 +135,9 @@ CONFIG_CELL_IIC=y ...@@ -132,7 +135,9 @@ CONFIG_CELL_IIC=y
# Cell Broadband Engine options # Cell Broadband Engine options
# #
CONFIG_SPU_FS=m CONFIG_SPU_FS=m
CONFIG_SPU_BASE=y
CONFIG_SPUFS_MMAP=y CONFIG_SPUFS_MMAP=y
CONFIG_CBE_RAS=y
# #
# Kernel options # Kernel options
...@@ -152,20 +157,24 @@ CONFIG_FORCE_MAX_ZONEORDER=13 ...@@ -152,20 +157,24 @@ CONFIG_FORCE_MAX_ZONEORDER=13
CONFIG_KEXEC=y CONFIG_KEXEC=y
# CONFIG_CRASH_DUMP is not set # CONFIG_CRASH_DUMP is not set
CONFIG_IRQ_ALL_CPUS=y CONFIG_IRQ_ALL_CPUS=y
# CONFIG_NUMA is not set CONFIG_NUMA=y
CONFIG_NODES_SHIFT=4
CONFIG_ARCH_SELECT_MEMORY_MODEL=y CONFIG_ARCH_SELECT_MEMORY_MODEL=y
CONFIG_ARCH_FLATMEM_ENABLE=y
CONFIG_ARCH_SPARSEMEM_ENABLE=y CONFIG_ARCH_SPARSEMEM_ENABLE=y
CONFIG_SELECT_MEMORY_MODEL=y CONFIG_SELECT_MEMORY_MODEL=y
# CONFIG_FLATMEM_MANUAL is not set # CONFIG_FLATMEM_MANUAL is not set
# CONFIG_DISCONTIGMEM_MANUAL is not set # CONFIG_DISCONTIGMEM_MANUAL is not set
CONFIG_SPARSEMEM_MANUAL=y CONFIG_SPARSEMEM_MANUAL=y
CONFIG_SPARSEMEM=y CONFIG_SPARSEMEM=y
CONFIG_NEED_MULTIPLE_NODES=y
CONFIG_HAVE_MEMORY_PRESENT=y CONFIG_HAVE_MEMORY_PRESENT=y
# CONFIG_SPARSEMEM_STATIC is not set # CONFIG_SPARSEMEM_STATIC is not set
CONFIG_SPARSEMEM_EXTREME=y CONFIG_SPARSEMEM_EXTREME=y
# CONFIG_MEMORY_HOTPLUG is not set CONFIG_MEMORY_HOTPLUG=y
CONFIG_SPLIT_PTLOCK_CPUS=4 CONFIG_SPLIT_PTLOCK_CPUS=4
CONFIG_MIGRATION=y
CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID=y
CONFIG_ARCH_MEMORY_PROBE=y
# CONFIG_PPC_64K_PAGES is not set # CONFIG_PPC_64K_PAGES is not set
CONFIG_SCHED_SMT=y CONFIG_SCHED_SMT=y
CONFIG_PROC_DEVICETREE=y CONFIG_PROC_DEVICETREE=y
...@@ -182,6 +191,7 @@ CONFIG_GENERIC_ISA_DMA=y ...@@ -182,6 +191,7 @@ CONFIG_GENERIC_ISA_DMA=y
# CONFIG_PPC_INDIRECT_PCI is not set # CONFIG_PPC_INDIRECT_PCI is not set
CONFIG_PCI=y CONFIG_PCI=y
CONFIG_PCI_DOMAINS=y CONFIG_PCI_DOMAINS=y
CONFIG_PCIEPORTBUS=y
# CONFIG_PCI_DEBUG is not set # CONFIG_PCI_DEBUG is not set
# #
...@@ -476,7 +486,7 @@ CONFIG_DM_MULTIPATH=m ...@@ -476,7 +486,7 @@ CONFIG_DM_MULTIPATH=m
# #
CONFIG_NETDEVICES=y CONFIG_NETDEVICES=y
# CONFIG_DUMMY is not set # CONFIG_DUMMY is not set
# CONFIG_BONDING is not set CONFIG_BONDING=y
# CONFIG_EQUALIZER is not set # CONFIG_EQUALIZER is not set
# CONFIG_TUN is not set # CONFIG_TUN is not set
...@@ -624,6 +634,7 @@ CONFIG_SERIAL_NONSTANDARD=y ...@@ -624,6 +634,7 @@ CONFIG_SERIAL_NONSTANDARD=y
# CONFIG_N_HDLC is not set # CONFIG_N_HDLC is not set
# CONFIG_SPECIALIX is not set # CONFIG_SPECIALIX is not set
# CONFIG_SX is not set # CONFIG_SX is not set
# CONFIG_RIO is not set
# CONFIG_STALDRV is not set # CONFIG_STALDRV is not set
# #
...@@ -766,6 +777,7 @@ CONFIG_I2C_ALGOBIT=y ...@@ -766,6 +777,7 @@ CONFIG_I2C_ALGOBIT=y
# Multimedia devices # Multimedia devices
# #
# CONFIG_VIDEO_DEV is not set # CONFIG_VIDEO_DEV is not set
CONFIG_VIDEO_V4L2=y
# #
# Digital Video Broadcasting Devices # Digital Video Broadcasting Devices
...@@ -1054,11 +1066,7 @@ CONFIG_DEBUGGER=y ...@@ -1054,11 +1066,7 @@ CONFIG_DEBUGGER=y
# CONFIG_XMON is not set # CONFIG_XMON is not set
CONFIG_IRQSTACKS=y CONFIG_IRQSTACKS=y
# CONFIG_BOOTX_TEXT is not set # CONFIG_BOOTX_TEXT is not set
# CONFIG_PPC_EARLY_DEBUG_LPAR is not set # CONFIG_PPC_EARLY_DEBUG is not set
# CONFIG_PPC_EARLY_DEBUG_G5 is not set
# CONFIG_PPC_EARLY_DEBUG_RTAS is not set
# CONFIG_PPC_EARLY_DEBUG_MAPLE is not set
# CONFIG_PPC_EARLY_DEBUG_ISERIES is not set
# #
# Security options # Security options
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
# #
# Automatically generated make config: don't edit # Automatically generated make config: don't edit
# Linux kernel version: 2.6.17-rc1 # Linux kernel version: 2.6.17-rc4
# Wed Apr 19 11:48:00 2006 # Sun May 28 07:26:56 2006
# #
CONFIG_PPC64=y CONFIG_PPC64=y
CONFIG_64BIT=y CONFIG_64BIT=y
...@@ -11,6 +11,7 @@ CONFIG_GENERIC_HARDIRQS=y ...@@ -11,6 +11,7 @@ CONFIG_GENERIC_HARDIRQS=y
CONFIG_RWSEM_XCHGADD_ALGORITHM=y CONFIG_RWSEM_XCHGADD_ALGORITHM=y
CONFIG_GENERIC_HWEIGHT=y CONFIG_GENERIC_HWEIGHT=y
CONFIG_GENERIC_CALIBRATE_DELAY=y CONFIG_GENERIC_CALIBRATE_DELAY=y
CONFIG_GENERIC_FIND_NEXT_BIT=y
CONFIG_PPC=y CONFIG_PPC=y
CONFIG_EARLY_PRINTK=y CONFIG_EARLY_PRINTK=y
CONFIG_COMPAT=y CONFIG_COMPAT=y
...@@ -126,8 +127,9 @@ CONFIG_RTAS_PROC=y ...@@ -126,8 +127,9 @@ CONFIG_RTAS_PROC=y
CONFIG_RTAS_FLASH=m CONFIG_RTAS_FLASH=m
# CONFIG_MMIO_NVRAM is not set # CONFIG_MMIO_NVRAM is not set
CONFIG_IBMVIO=y CONFIG_IBMVIO=y
# CONFIG_IBMEBUS is not set CONFIG_IBMEBUS=y
# CONFIG_PPC_MPC106 is not set # CONFIG_PPC_MPC106 is not set
# CONFIG_PPC_970_NAP is not set
# CONFIG_CPU_FREQ is not set # CONFIG_CPU_FREQ is not set
# CONFIG_WANT_EARLY_SERIAL is not set # CONFIG_WANT_EARLY_SERIAL is not set
...@@ -143,7 +145,7 @@ CONFIG_PREEMPT_NONE=y ...@@ -143,7 +145,7 @@ CONFIG_PREEMPT_NONE=y
# CONFIG_PREEMPT is not set # CONFIG_PREEMPT is not set
# CONFIG_PREEMPT_BKL is not set # CONFIG_PREEMPT_BKL is not set
CONFIG_BINFMT_ELF=y CONFIG_BINFMT_ELF=y
# CONFIG_BINFMT_MISC is not set CONFIG_BINFMT_MISC=m
CONFIG_FORCE_MAX_ZONEORDER=13 CONFIG_FORCE_MAX_ZONEORDER=13
CONFIG_IOMMU_VMERGE=y CONFIG_IOMMU_VMERGE=y
CONFIG_HOTPLUG_CPU=y CONFIG_HOTPLUG_CPU=y
...@@ -155,6 +157,7 @@ CONFIG_EEH=y ...@@ -155,6 +157,7 @@ CONFIG_EEH=y
CONFIG_SCANLOG=m CONFIG_SCANLOG=m
CONFIG_LPARCFG=y CONFIG_LPARCFG=y
CONFIG_NUMA=y CONFIG_NUMA=y
CONFIG_NODES_SHIFT=4
CONFIG_ARCH_SELECT_MEMORY_MODEL=y CONFIG_ARCH_SELECT_MEMORY_MODEL=y
CONFIG_ARCH_SPARSEMEM_ENABLE=y CONFIG_ARCH_SPARSEMEM_ENABLE=y
CONFIG_ARCH_SPARSEMEM_DEFAULT=y CONFIG_ARCH_SPARSEMEM_DEFAULT=y
...@@ -467,7 +470,7 @@ CONFIG_SCSI_CONSTANTS=y ...@@ -467,7 +470,7 @@ CONFIG_SCSI_CONSTANTS=y
CONFIG_SCSI_SPI_ATTRS=y CONFIG_SCSI_SPI_ATTRS=y
CONFIG_SCSI_FC_ATTRS=y CONFIG_SCSI_FC_ATTRS=y
CONFIG_SCSI_ISCSI_ATTRS=m CONFIG_SCSI_ISCSI_ATTRS=m
# CONFIG_SCSI_SAS_ATTRS is not set CONFIG_SCSI_SAS_ATTRS=m
# #
# SCSI low-level drivers # SCSI low-level drivers
...@@ -499,13 +502,18 @@ CONFIG_SCSI_SYM53C8XX_2=y ...@@ -499,13 +502,18 @@ CONFIG_SCSI_SYM53C8XX_2=y
CONFIG_SCSI_SYM53C8XX_DMA_ADDRESSING_MODE=0 CONFIG_SCSI_SYM53C8XX_DMA_ADDRESSING_MODE=0
CONFIG_SCSI_SYM53C8XX_DEFAULT_TAGS=16 CONFIG_SCSI_SYM53C8XX_DEFAULT_TAGS=16
CONFIG_SCSI_SYM53C8XX_MAX_TAGS=64 CONFIG_SCSI_SYM53C8XX_MAX_TAGS=64
# CONFIG_SCSI_SYM53C8XX_IOMAPPED is not set CONFIG_SCSI_SYM53C8XX_MMIO=y
CONFIG_SCSI_IPR=y CONFIG_SCSI_IPR=y
CONFIG_SCSI_IPR_TRACE=y CONFIG_SCSI_IPR_TRACE=y
CONFIG_SCSI_IPR_DUMP=y CONFIG_SCSI_IPR_DUMP=y
# CONFIG_SCSI_QLOGIC_FC is not set
# CONFIG_SCSI_QLOGIC_1280 is not set # CONFIG_SCSI_QLOGIC_1280 is not set
# CONFIG_SCSI_QLA_FC is not set CONFIG_SCSI_QLA_FC=m
CONFIG_SCSI_QLA2XXX_EMBEDDED_FIRMWARE=y
CONFIG_SCSI_QLA21XX=m
CONFIG_SCSI_QLA22XX=m
CONFIG_SCSI_QLA2300=m
CONFIG_SCSI_QLA2322=m
CONFIG_SCSI_QLA24XX=m
CONFIG_SCSI_LPFC=m CONFIG_SCSI_LPFC=m
# CONFIG_SCSI_DC395x is not set # CONFIG_SCSI_DC395x is not set
# CONFIG_SCSI_DC390T is not set # CONFIG_SCSI_DC390T is not set
...@@ -521,7 +529,7 @@ CONFIG_MD_RAID0=y ...@@ -521,7 +529,7 @@ CONFIG_MD_RAID0=y
CONFIG_MD_RAID1=y CONFIG_MD_RAID1=y
CONFIG_MD_RAID10=m CONFIG_MD_RAID10=m
CONFIG_MD_RAID5=y CONFIG_MD_RAID5=y
# CONFIG_MD_RAID5_RESHAPE is not set CONFIG_MD_RAID5_RESHAPE=y
CONFIG_MD_RAID6=m CONFIG_MD_RAID6=m
CONFIG_MD_MULTIPATH=m CONFIG_MD_MULTIPATH=m
CONFIG_MD_FAULTY=m CONFIG_MD_FAULTY=m
...@@ -764,7 +772,7 @@ CONFIG_SERIAL_8250_RUNTIME_UARTS=4 ...@@ -764,7 +772,7 @@ CONFIG_SERIAL_8250_RUNTIME_UARTS=4
CONFIG_SERIAL_CORE=y CONFIG_SERIAL_CORE=y
CONFIG_SERIAL_CORE_CONSOLE=y CONFIG_SERIAL_CORE_CONSOLE=y
CONFIG_SERIAL_ICOM=m CONFIG_SERIAL_ICOM=m
# CONFIG_SERIAL_JSM is not set CONFIG_SERIAL_JSM=m
CONFIG_UNIX98_PTYS=y CONFIG_UNIX98_PTYS=y
CONFIG_LEGACY_PTYS=y CONFIG_LEGACY_PTYS=y
CONFIG_LEGACY_PTY_COUNT=256 CONFIG_LEGACY_PTY_COUNT=256
...@@ -773,7 +781,7 @@ CONFIG_LEGACY_PTY_COUNT=256 ...@@ -773,7 +781,7 @@ CONFIG_LEGACY_PTY_COUNT=256
# CONFIG_TIPAR is not set # CONFIG_TIPAR is not set
CONFIG_HVC_DRIVER=y CONFIG_HVC_DRIVER=y
CONFIG_HVC_CONSOLE=y CONFIG_HVC_CONSOLE=y
# CONFIG_HVC_RTAS is not set CONFIG_HVC_RTAS=y
CONFIG_HVCS=m CONFIG_HVCS=m
# #
...@@ -1031,9 +1039,7 @@ CONFIG_USB_HIDDEV=y ...@@ -1031,9 +1039,7 @@ CONFIG_USB_HIDDEV=y
# CONFIG_USB_ACECAD is not set # CONFIG_USB_ACECAD is not set
# CONFIG_USB_KBTAB is not set # CONFIG_USB_KBTAB is not set
# CONFIG_USB_POWERMATE is not set # CONFIG_USB_POWERMATE is not set
# CONFIG_USB_MTOUCH is not set # CONFIG_USB_TOUCHSCREEN is not set
# CONFIG_USB_ITMTOUCH is not set
# CONFIG_USB_EGALAX is not set
# CONFIG_USB_YEALINK is not set # CONFIG_USB_YEALINK is not set
# CONFIG_USB_XPAD is not set # CONFIG_USB_XPAD is not set
# CONFIG_USB_ATI_REMOTE is not set # CONFIG_USB_ATI_REMOTE is not set
...@@ -1104,17 +1110,26 @@ CONFIG_USB_MON=y ...@@ -1104,17 +1110,26 @@ CONFIG_USB_MON=y
# #
# CONFIG_NEW_LEDS is not set # CONFIG_NEW_LEDS is not set
#
# LED drivers
#
#
# LED Triggers
#
# #
# InfiniBand support # InfiniBand support
# #
CONFIG_INFINIBAND=m CONFIG_INFINIBAND=m
# CONFIG_INFINIBAND_USER_MAD is not set CONFIG_INFINIBAND_USER_MAD=m
# CONFIG_INFINIBAND_USER_ACCESS is not set CONFIG_INFINIBAND_USER_ACCESS=m
CONFIG_INFINIBAND_MTHCA=m CONFIG_INFINIBAND_MTHCA=m
# CONFIG_INFINIBAND_MTHCA_DEBUG is not set CONFIG_INFINIBAND_MTHCA_DEBUG=y
CONFIG_INFINIBAND_IPOIB=m CONFIG_INFINIBAND_IPOIB=m
# CONFIG_INFINIBAND_IPOIB_DEBUG is not set CONFIG_INFINIBAND_IPOIB_DEBUG=y
# CONFIG_INFINIBAND_SRP is not set # CONFIG_INFINIBAND_IPOIB_DEBUG_DATA is not set
CONFIG_INFINIBAND_SRP=m
# #
# EDAC - error detection and reporting (RAS) (EXPERIMENTAL) # EDAC - error detection and reporting (RAS) (EXPERIMENTAL)
...@@ -1159,15 +1174,15 @@ CONFIG_XFS_EXPORT=y ...@@ -1159,15 +1174,15 @@ CONFIG_XFS_EXPORT=y
CONFIG_XFS_SECURITY=y CONFIG_XFS_SECURITY=y
CONFIG_XFS_POSIX_ACL=y CONFIG_XFS_POSIX_ACL=y
# CONFIG_XFS_RT is not set # CONFIG_XFS_RT is not set
# CONFIG_OCFS2_FS is not set CONFIG_OCFS2_FS=m
# CONFIG_MINIX_FS is not set # CONFIG_MINIX_FS is not set
# CONFIG_ROMFS_FS is not set # CONFIG_ROMFS_FS is not set
CONFIG_INOTIFY=y CONFIG_INOTIFY=y
# CONFIG_QUOTA is not set # CONFIG_QUOTA is not set
CONFIG_DNOTIFY=y CONFIG_DNOTIFY=y
CONFIG_AUTOFS_FS=m # CONFIG_AUTOFS_FS is not set
# CONFIG_AUTOFS4_FS is not set CONFIG_AUTOFS4_FS=m
# CONFIG_FUSE_FS is not set CONFIG_FUSE_FS=m
# #
# CD-ROM/DVD Filesystems # CD-ROM/DVD Filesystems
...@@ -1199,7 +1214,7 @@ CONFIG_TMPFS=y ...@@ -1199,7 +1214,7 @@ CONFIG_TMPFS=y
CONFIG_HUGETLBFS=y CONFIG_HUGETLBFS=y
CONFIG_HUGETLB_PAGE=y CONFIG_HUGETLB_PAGE=y
CONFIG_RAMFS=y CONFIG_RAMFS=y
# CONFIG_CONFIGFS_FS is not set CONFIG_CONFIGFS_FS=m
# #
# Miscellaneous filesystems # Miscellaneous filesystems
...@@ -1317,7 +1332,7 @@ CONFIG_ZLIB_DEFLATE=m ...@@ -1317,7 +1332,7 @@ CONFIG_ZLIB_DEFLATE=m
# #
CONFIG_PROFILING=y CONFIG_PROFILING=y
CONFIG_OPROFILE=y CONFIG_OPROFILE=y
# CONFIG_KPROBES is not set CONFIG_KPROBES=y
# #
# Kernel hacking # Kernel hacking
...@@ -1329,7 +1344,7 @@ CONFIG_LOG_BUF_SHIFT=17 ...@@ -1329,7 +1344,7 @@ CONFIG_LOG_BUF_SHIFT=17
CONFIG_DETECT_SOFTLOCKUP=y CONFIG_DETECT_SOFTLOCKUP=y
# CONFIG_SCHEDSTATS is not set # CONFIG_SCHEDSTATS is not set
# CONFIG_DEBUG_SLAB is not set # CONFIG_DEBUG_SLAB is not set
CONFIG_DEBUG_MUTEXES=y # CONFIG_DEBUG_MUTEXES is not set
# CONFIG_DEBUG_SPINLOCK is not set # CONFIG_DEBUG_SPINLOCK is not set
# CONFIG_DEBUG_SPINLOCK_SLEEP is not set # CONFIG_DEBUG_SPINLOCK_SLEEP is not set
# CONFIG_DEBUG_KOBJECT is not set # CONFIG_DEBUG_KOBJECT is not set
...@@ -1339,17 +1354,13 @@ CONFIG_DEBUG_FS=y ...@@ -1339,17 +1354,13 @@ CONFIG_DEBUG_FS=y
CONFIG_FORCED_INLINING=y CONFIG_FORCED_INLINING=y
# CONFIG_RCU_TORTURE_TEST is not set # CONFIG_RCU_TORTURE_TEST is not set
CONFIG_DEBUG_STACKOVERFLOW=y CONFIG_DEBUG_STACKOVERFLOW=y
CONFIG_DEBUG_STACK_USAGE=y # CONFIG_DEBUG_STACK_USAGE is not set
CONFIG_DEBUGGER=y CONFIG_DEBUGGER=y
CONFIG_XMON=y CONFIG_XMON=y
CONFIG_XMON_DEFAULT=y CONFIG_XMON_DEFAULT=y
CONFIG_IRQSTACKS=y CONFIG_IRQSTACKS=y
# CONFIG_BOOTX_TEXT is not set # CONFIG_BOOTX_TEXT is not set
# CONFIG_PPC_EARLY_DEBUG_LPAR is not set # CONFIG_PPC_EARLY_DEBUG is not set
# CONFIG_PPC_EARLY_DEBUG_G5 is not set
# CONFIG_PPC_EARLY_DEBUG_RTAS is not set
# CONFIG_PPC_EARLY_DEBUG_MAPLE is not set
# CONFIG_PPC_EARLY_DEBUG_ISERIES is not set
# #
# Security options # Security options
......
...@@ -35,17 +35,19 @@ struct aligninfo { ...@@ -35,17 +35,19 @@ struct aligninfo {
#define INVALID { 0, 0 } #define INVALID { 0, 0 }
#define LD 1 /* load */ /* Bits in the flags field */
#define ST 2 /* store */ #define LD 0 /* load */
#define SE 4 /* sign-extend value */ #define ST 1 /* store */
#define F 8 /* to/from fp regs */ #define SE 2 /* sign-extend value */
#define U 0x10 /* update index register */ #define F 4 /* to/from fp regs */
#define M 0x20 /* multiple load/store */ #define U 8 /* update index register */
#define SW 0x40 /* byte swap int or ... */ #define M 0x10 /* multiple load/store */
#define S 0x40 /* ... single-precision fp */ #define SW 0x20 /* byte swap */
#define SX 0x40 /* byte count in XER */ #define S 0x40 /* single-precision fp or... */
#define SX 0x40 /* ... byte count in XER */
#define HARD 0x80 /* string, stwcx. */ #define HARD 0x80 /* string, stwcx. */
/* DSISR bits reported for a DCBZ instruction: */
#define DCBZ 0x5f /* 8xx/82xx dcbz faults when cache not enabled */ #define DCBZ 0x5f /* 8xx/82xx dcbz faults when cache not enabled */
#define SWAP(a, b) (t = (a), (a) = (b), (b) = t) #define SWAP(a, b) (t = (a), (a) = (b), (b) = t)
...@@ -256,12 +258,16 @@ static int emulate_dcbz(struct pt_regs *regs, unsigned char __user *addr) ...@@ -256,12 +258,16 @@ static int emulate_dcbz(struct pt_regs *regs, unsigned char __user *addr)
#define REG_BYTE(rp, i) *((u8 *)(rp) + (i)) #define REG_BYTE(rp, i) *((u8 *)(rp) + (i))
#endif #endif
#define SWIZ_PTR(p) ((unsigned char __user *)((p) ^ swiz))
static int emulate_multiple(struct pt_regs *regs, unsigned char __user *addr, static int emulate_multiple(struct pt_regs *regs, unsigned char __user *addr,
unsigned int reg, unsigned int nb, unsigned int reg, unsigned int nb,
unsigned int flags, unsigned int instr) unsigned int flags, unsigned int instr,
unsigned long swiz)
{ {
unsigned long *rptr; unsigned long *rptr;
unsigned int nb0, i; unsigned int nb0, i, bswiz;
unsigned long p;
/* /*
* We do not try to emulate 8 bytes multiple as they aren't really * We do not try to emulate 8 bytes multiple as they aren't really
...@@ -280,9 +286,12 @@ static int emulate_multiple(struct pt_regs *regs, unsigned char __user *addr, ...@@ -280,9 +286,12 @@ static int emulate_multiple(struct pt_regs *regs, unsigned char __user *addr,
if (nb == 0) if (nb == 0)
return 1; return 1;
} else { } else {
if (__get_user(instr, unsigned long pc = regs->nip ^ (swiz & 4);
(unsigned int __user *)regs->nip))
if (__get_user(instr, (unsigned int __user *)pc))
return -EFAULT; return -EFAULT;
if (swiz == 0 && (flags & SW))
instr = cpu_to_le32(instr);
nb = (instr >> 11) & 0x1f; nb = (instr >> 11) & 0x1f;
if (nb == 0) if (nb == 0)
nb = 32; nb = 32;
...@@ -300,7 +309,10 @@ static int emulate_multiple(struct pt_regs *regs, unsigned char __user *addr, ...@@ -300,7 +309,10 @@ static int emulate_multiple(struct pt_regs *regs, unsigned char __user *addr,
return -EFAULT; /* bad address */ return -EFAULT; /* bad address */
rptr = &regs->gpr[reg]; rptr = &regs->gpr[reg];
if (flags & LD) { p = (unsigned long) addr;
bswiz = (flags & SW)? 3: 0;
if (!(flags & ST)) {
/* /*
* This zeroes the top 4 bytes of the affected registers * This zeroes the top 4 bytes of the affected registers
* in 64-bit mode, and also zeroes out any remaining * in 64-bit mode, and also zeroes out any remaining
...@@ -311,26 +323,28 @@ static int emulate_multiple(struct pt_regs *regs, unsigned char __user *addr, ...@@ -311,26 +323,28 @@ static int emulate_multiple(struct pt_regs *regs, unsigned char __user *addr,
memset(&regs->gpr[0], 0, memset(&regs->gpr[0], 0,
((nb0 + 3) / 4) * sizeof(unsigned long)); ((nb0 + 3) / 4) * sizeof(unsigned long));
for (i = 0; i < nb; ++i) for (i = 0; i < nb; ++i, ++p)
if (__get_user(REG_BYTE(rptr, i), addr + i)) if (__get_user(REG_BYTE(rptr, i ^ bswiz), SWIZ_PTR(p)))
return -EFAULT; return -EFAULT;
if (nb0 > 0) { if (nb0 > 0) {
rptr = &regs->gpr[0]; rptr = &regs->gpr[0];
addr += nb; addr += nb;
for (i = 0; i < nb0; ++i) for (i = 0; i < nb0; ++i, ++p)
if (__get_user(REG_BYTE(rptr, i), addr + i)) if (__get_user(REG_BYTE(rptr, i ^ bswiz),
SWIZ_PTR(p)))
return -EFAULT; return -EFAULT;
} }
} else { } else {
for (i = 0; i < nb; ++i) for (i = 0; i < nb; ++i, ++p)
if (__put_user(REG_BYTE(rptr, i), addr + i)) if (__put_user(REG_BYTE(rptr, i ^ bswiz), SWIZ_PTR(p)))
return -EFAULT; return -EFAULT;
if (nb0 > 0) { if (nb0 > 0) {
rptr = &regs->gpr[0]; rptr = &regs->gpr[0];
addr += nb; addr += nb;
for (i = 0; i < nb0; ++i) for (i = 0; i < nb0; ++i, ++p)
if (__put_user(REG_BYTE(rptr, i), addr + i)) if (__put_user(REG_BYTE(rptr, i ^ bswiz),
SWIZ_PTR(p)))
return -EFAULT; return -EFAULT;
} }
} }
...@@ -352,7 +366,7 @@ int fix_alignment(struct pt_regs *regs) ...@@ -352,7 +366,7 @@ int fix_alignment(struct pt_regs *regs)
unsigned int reg, areg; unsigned int reg, areg;
unsigned int dsisr; unsigned int dsisr;
unsigned char __user *addr; unsigned char __user *addr;
unsigned char __user *p; unsigned long p, swiz;
int ret, t; int ret, t;
union { union {
u64 ll; u64 ll;
...@@ -380,11 +394,15 @@ int fix_alignment(struct pt_regs *regs) ...@@ -380,11 +394,15 @@ int fix_alignment(struct pt_regs *regs)
* let's make one up from the instruction * let's make one up from the instruction
*/ */
if (cpu_has_feature(CPU_FTR_NODSISRALIGN)) { if (cpu_has_feature(CPU_FTR_NODSISRALIGN)) {
unsigned int real_instr; unsigned long pc = regs->nip;
if (unlikely(__get_user(real_instr,
(unsigned int __user *)regs->nip))) if (cpu_has_feature(CPU_FTR_PPC_LE) && (regs->msr & MSR_LE))
pc ^= 4;
if (unlikely(__get_user(instr, (unsigned int __user *)pc)))
return -EFAULT; return -EFAULT;
dsisr = make_dsisr(real_instr); if (cpu_has_feature(CPU_FTR_REAL_LE) && (regs->msr & MSR_LE))
instr = cpu_to_le32(instr);
dsisr = make_dsisr(instr);
} }
/* extract the operation and registers from the dsisr */ /* extract the operation and registers from the dsisr */
...@@ -397,6 +415,24 @@ int fix_alignment(struct pt_regs *regs) ...@@ -397,6 +415,24 @@ int fix_alignment(struct pt_regs *regs)
nb = aligninfo[instr].len; nb = aligninfo[instr].len;
flags = aligninfo[instr].flags; flags = aligninfo[instr].flags;
/* Byteswap little endian loads and stores */
swiz = 0;
if (regs->msr & MSR_LE) {
flags ^= SW;
/*
* So-called "PowerPC little endian" mode works by
* swizzling addresses rather than by actually doing
* any byte-swapping. To emulate this, we XOR each
* byte address with 7. We also byte-swap, because
* the processor's address swizzling depends on the
* operand size (it xors the address with 7 for bytes,
* 6 for halfwords, 4 for words, 0 for doublewords) but
* we will xor with 7 and load/store each byte separately.
*/
if (cpu_has_feature(CPU_FTR_PPC_LE))
swiz = 7;
}
/* DAR has the operand effective address */ /* DAR has the operand effective address */
addr = (unsigned char __user *)regs->dar; addr = (unsigned char __user *)regs->dar;
...@@ -412,7 +448,8 @@ int fix_alignment(struct pt_regs *regs) ...@@ -412,7 +448,8 @@ int fix_alignment(struct pt_regs *regs)
* function * function
*/ */
if (flags & M) if (flags & M)
return emulate_multiple(regs, addr, reg, nb, flags, instr); return emulate_multiple(regs, addr, reg, nb,
flags, instr, swiz);
/* Verify the address of the operand */ /* Verify the address of the operand */
if (unlikely(user_mode(regs) && if (unlikely(user_mode(regs) &&
...@@ -431,51 +468,71 @@ int fix_alignment(struct pt_regs *regs) ...@@ -431,51 +468,71 @@ int fix_alignment(struct pt_regs *regs)
/* If we are loading, get the data from user space, else /* If we are loading, get the data from user space, else
* get it from register values * get it from register values
*/ */
if (flags & LD) { if (!(flags & ST)) {
data.ll = 0; data.ll = 0;
ret = 0; ret = 0;
p = addr; p = (unsigned long) addr;
switch (nb) { switch (nb) {
case 8: case 8:
ret |= __get_user(data.v[0], p++); ret |= __get_user(data.v[0], SWIZ_PTR(p++));
ret |= __get_user(data.v[1], p++); ret |= __get_user(data.v[1], SWIZ_PTR(p++));
ret |= __get_user(data.v[2], p++); ret |= __get_user(data.v[2], SWIZ_PTR(p++));
ret |= __get_user(data.v[3], p++); ret |= __get_user(data.v[3], SWIZ_PTR(p++));
case 4: case 4:
ret |= __get_user(data.v[4], p++); ret |= __get_user(data.v[4], SWIZ_PTR(p++));
ret |= __get_user(data.v[5], p++); ret |= __get_user(data.v[5], SWIZ_PTR(p++));
case 2: case 2:
ret |= __get_user(data.v[6], p++); ret |= __get_user(data.v[6], SWIZ_PTR(p++));
ret |= __get_user(data.v[7], p++); ret |= __get_user(data.v[7], SWIZ_PTR(p++));
if (unlikely(ret)) if (unlikely(ret))
return -EFAULT; return -EFAULT;
} }
} else if (flags & F) } else if (flags & F) {
data.dd = current->thread.fpr[reg]; data.dd = current->thread.fpr[reg];
else if (flags & S) {
/* Single-precision FP store requires conversion... */
#ifdef CONFIG_PPC_FPU
preempt_disable();
enable_kernel_fp();
cvt_df(&data.dd, (float *)&data.v[4], &current->thread);
preempt_enable();
#else
return 0;
#endif
}
} else
data.ll = regs->gpr[reg]; data.ll = regs->gpr[reg];
/* Perform other misc operations like sign extension, byteswap, if (flags & SW) {
switch (nb) {
case 8:
SWAP(data.v[0], data.v[7]);
SWAP(data.v[1], data.v[6]);
SWAP(data.v[2], data.v[5]);
SWAP(data.v[3], data.v[4]);
break;
case 4:
SWAP(data.v[4], data.v[7]);
SWAP(data.v[5], data.v[6]);
break;
case 2:
SWAP(data.v[6], data.v[7]);
break;
}
}
/* Perform other misc operations like sign extension
* or floating point single precision conversion * or floating point single precision conversion
*/ */
switch (flags & ~U) { switch (flags & ~(U|SW)) {
case LD+SE: /* sign extend */ case LD+SE: /* sign extend */
if ( nb == 2 ) if ( nb == 2 )
data.ll = data.x16.low16; data.ll = data.x16.low16;
else /* nb must be 4 */ else /* nb must be 4 */
data.ll = data.x32.low32; data.ll = data.x32.low32;
break; break;
case LD+S: /* byte-swap */
case ST+S:
if (nb == 2) {
SWAP(data.v[6], data.v[7]);
} else {
SWAP(data.v[4], data.v[7]);
SWAP(data.v[5], data.v[6]);
}
break;
/* Single-precision FP load and store require conversions... */ /* Single-precision FP load requires conversion... */
case LD+F+S: case LD+F+S:
#ifdef CONFIG_PPC_FPU #ifdef CONFIG_PPC_FPU
preempt_disable(); preempt_disable();
...@@ -484,16 +541,6 @@ int fix_alignment(struct pt_regs *regs) ...@@ -484,16 +541,6 @@ int fix_alignment(struct pt_regs *regs)
preempt_enable(); preempt_enable();
#else #else
return 0; return 0;
#endif
break;
case ST+F+S:
#ifdef CONFIG_PPC_FPU
preempt_disable();
enable_kernel_fp();
cvt_df(&data.dd, (float *)&data.v[4], &current->thread);
preempt_enable();
#else
return 0;
#endif #endif
break; break;
} }
...@@ -501,19 +548,19 @@ int fix_alignment(struct pt_regs *regs) ...@@ -501,19 +548,19 @@ int fix_alignment(struct pt_regs *regs)
/* Store result to memory or update registers */ /* Store result to memory or update registers */
if (flags & ST) { if (flags & ST) {
ret = 0; ret = 0;
p = addr; p = (unsigned long) addr;
switch (nb) { switch (nb) {
case 8: case 8:
ret |= __put_user(data.v[0], p++); ret |= __put_user(data.v[0], SWIZ_PTR(p++));
ret |= __put_user(data.v[1], p++); ret |= __put_user(data.v[1], SWIZ_PTR(p++));
ret |= __put_user(data.v[2], p++); ret |= __put_user(data.v[2], SWIZ_PTR(p++));
ret |= __put_user(data.v[3], p++); ret |= __put_user(data.v[3], SWIZ_PTR(p++));
case 4: case 4:
ret |= __put_user(data.v[4], p++); ret |= __put_user(data.v[4], SWIZ_PTR(p++));
ret |= __put_user(data.v[5], p++); ret |= __put_user(data.v[5], SWIZ_PTR(p++));
case 2: case 2:
ret |= __put_user(data.v[6], p++); ret |= __put_user(data.v[6], SWIZ_PTR(p++));
ret |= __put_user(data.v[7], p++); ret |= __put_user(data.v[7], SWIZ_PTR(p++));
} }
if (unlikely(ret)) if (unlikely(ret))
return -EFAULT; return -EFAULT;
......
...@@ -122,9 +122,8 @@ int main(void) ...@@ -122,9 +122,8 @@ int main(void)
DEFINE(PACASLBCACHE, offsetof(struct paca_struct, slb_cache)); DEFINE(PACASLBCACHE, offsetof(struct paca_struct, slb_cache));
DEFINE(PACASLBCACHEPTR, offsetof(struct paca_struct, slb_cache_ptr)); DEFINE(PACASLBCACHEPTR, offsetof(struct paca_struct, slb_cache_ptr));
DEFINE(PACACONTEXTID, offsetof(struct paca_struct, context.id)); DEFINE(PACACONTEXTID, offsetof(struct paca_struct, context.id));
#ifdef CONFIG_PPC_64K_PAGES DEFINE(PACACONTEXTSLLP, offsetof(struct paca_struct, context.sllp));
DEFINE(PACAPGDIR, offsetof(struct paca_struct, pgdir)); DEFINE(PACAVMALLOCSLLP, offsetof(struct paca_struct, vmalloc_sllp));
#endif
#ifdef CONFIG_HUGETLB_PAGE #ifdef CONFIG_HUGETLB_PAGE
DEFINE(PACALOWHTLBAREAS, offsetof(struct paca_struct, context.low_htlb_areas)); DEFINE(PACALOWHTLBAREAS, offsetof(struct paca_struct, context.low_htlb_areas));
DEFINE(PACAHIGHHTLBAREAS, offsetof(struct paca_struct, context.high_htlb_areas)); DEFINE(PACAHIGHHTLBAREAS, offsetof(struct paca_struct, context.high_htlb_areas));
......
...@@ -210,9 +210,11 @@ setup_745x_specifics: ...@@ -210,9 +210,11 @@ setup_745x_specifics:
* the firmware. If any, we disable NAP capability as * the firmware. If any, we disable NAP capability as
* it's known to be bogus on rev 2.1 and earlier * it's known to be bogus on rev 2.1 and earlier
*/ */
BEGIN_FTR_SECTION
mfspr r11,SPRN_L3CR mfspr r11,SPRN_L3CR
andis. r11,r11,L3CR_L3E@h andis. r11,r11,L3CR_L3E@h
beq 1f beq 1f
END_FTR_SECTION_IFSET(CPU_FTR_L3CR)
lwz r6,CPU_SPEC_FEATURES(r5) lwz r6,CPU_SPEC_FEATURES(r5)
andi. r0,r6,CPU_FTR_L3_DISABLE_NAP andi. r0,r6,CPU_FTR_L3_DISABLE_NAP
beq 1f beq 1f
......
...@@ -73,23 +73,6 @@ _GLOBAL(__970_cpu_preinit) ...@@ -73,23 +73,6 @@ _GLOBAL(__970_cpu_preinit)
isync isync
blr blr
_GLOBAL(__setup_cpu_power4)
blr
_GLOBAL(__setup_cpu_be)
/* Set large page sizes LP=0: 16MB, LP=1: 64KB */
addi r3, 0, 0
ori r3, r3, HID6_LB
sldi r3, r3, 32
nor r3, r3, r3
mfspr r4, SPRN_HID6
and r4, r4, r3
addi r3, 0, 0x02000
sldi r3, r3, 32
or r4, r4, r3
mtspr SPRN_HID6, r4
blr
_GLOBAL(__setup_cpu_ppc970) _GLOBAL(__setup_cpu_ppc970)
mfspr r0,SPRN_HID0 mfspr r0,SPRN_HID0
li r11,5 /* clear DOZE and SLEEP */ li r11,5 /* clear DOZE and SLEEP */
......
This diff is collapsed.
...@@ -22,6 +22,7 @@ ...@@ -22,6 +22,7 @@
#include <linux/elf.h> #include <linux/elf.h>
#include <linux/elfcore.h> #include <linux/elfcore.h>
#include <linux/init.h> #include <linux/init.h>
#include <linux/irq.h>
#include <linux/types.h> #include <linux/types.h>
#include <asm/processor.h> #include <asm/processor.h>
...@@ -174,6 +175,8 @@ static void crash_kexec_prepare_cpus(void) ...@@ -174,6 +175,8 @@ static void crash_kexec_prepare_cpus(void)
void default_machine_crash_shutdown(struct pt_regs *regs) void default_machine_crash_shutdown(struct pt_regs *regs)
{ {
unsigned int irq;
/* /*
* This function is only called after the system * This function is only called after the system
* has paniced or is otherwise in a critical state. * has paniced or is otherwise in a critical state.
...@@ -186,6 +189,16 @@ void default_machine_crash_shutdown(struct pt_regs *regs) ...@@ -186,6 +189,16 @@ void default_machine_crash_shutdown(struct pt_regs *regs)
*/ */
local_irq_disable(); local_irq_disable();
for_each_irq(irq) {
struct irq_desc *desc = irq_descp(irq);
if (desc->status & IRQ_INPROGRESS)
desc->handler->end(irq);
if (!(desc->status & IRQ_DISABLED))
desc->handler->disable(irq);
}
if (ppc_md.kexec_cpu_down) if (ppc_md.kexec_cpu_down)
ppc_md.kexec_cpu_down(1, 0); ppc_md.kexec_cpu_down(1, 0);
......
...@@ -25,6 +25,11 @@ ...@@ -25,6 +25,11 @@
#define DBG(fmt...) #define DBG(fmt...)
#endif #endif
void reserve_kdump_trampoline(void)
{
lmb_reserve(0, KDUMP_RESERVE_LIMIT);
}
static void __init create_trampoline(unsigned long addr) static void __init create_trampoline(unsigned long addr)
{ {
/* The maximum range of a single instruction branch, is the current /* The maximum range of a single instruction branch, is the current
...@@ -39,11 +44,11 @@ static void __init create_trampoline(unsigned long addr) ...@@ -39,11 +44,11 @@ static void __init create_trampoline(unsigned long addr)
create_branch(addr + 4, addr + PHYSICAL_START, 0); create_branch(addr + 4, addr + PHYSICAL_START, 0);
} }
void __init kdump_setup(void) void __init setup_kdump_trampoline(void)
{ {
unsigned long i; unsigned long i;
DBG(" -> kdump_setup()\n"); DBG(" -> setup_kdump_trampoline()\n");
for (i = KDUMP_TRAMPOLINE_START; i < KDUMP_TRAMPOLINE_END; i += 8) { for (i = KDUMP_TRAMPOLINE_START; i < KDUMP_TRAMPOLINE_END; i += 8) {
create_trampoline(i); create_trampoline(i);
...@@ -52,7 +57,7 @@ void __init kdump_setup(void) ...@@ -52,7 +57,7 @@ void __init kdump_setup(void)
create_trampoline(__pa(system_reset_fwnmi) - PHYSICAL_START); create_trampoline(__pa(system_reset_fwnmi) - PHYSICAL_START);
create_trampoline(__pa(machine_check_fwnmi) - PHYSICAL_START); create_trampoline(__pa(machine_check_fwnmi) - PHYSICAL_START);
DBG(" <- kdump_setup()\n"); DBG(" <- setup_kdump_trampoline()\n");
} }
#ifdef CONFIG_PROC_VMCORE #ifdef CONFIG_PROC_VMCORE
......
...@@ -57,6 +57,7 @@ system_call_common: ...@@ -57,6 +57,7 @@ system_call_common:
beq- 1f beq- 1f
ld r1,PACAKSAVE(r13) ld r1,PACAKSAVE(r13)
1: std r10,0(r1) 1: std r10,0(r1)
crclr so
std r11,_NIP(r1) std r11,_NIP(r1)
std r12,_MSR(r1) std r12,_MSR(r1)
std r0,GPR0(r1) std r0,GPR0(r1)
...@@ -75,7 +76,6 @@ system_call_common: ...@@ -75,7 +76,6 @@ system_call_common:
std r11,GPR11(r1) std r11,GPR11(r1)
std r11,GPR12(r1) std r11,GPR12(r1)
std r9,GPR13(r1) std r9,GPR13(r1)
crclr so
mfcr r9 mfcr r9
mflr r10 mflr r10
li r11,0xc01 li r11,0xc01
......
...@@ -72,7 +72,7 @@ _GLOBAL(load_up_fpu) ...@@ -72,7 +72,7 @@ _GLOBAL(load_up_fpu)
std r12,_MSR(r1) std r12,_MSR(r1)
#endif #endif
lfd fr0,THREAD_FPSCR(r5) lfd fr0,THREAD_FPSCR(r5)
mtfsf 0xff,fr0 MTFSF_L(fr0)
REST_32FPRS(0, r5) REST_32FPRS(0, r5)
#ifndef CONFIG_SMP #ifndef CONFIG_SMP
subi r4,r5,THREAD subi r4,r5,THREAD
...@@ -127,7 +127,7 @@ _GLOBAL(giveup_fpu) ...@@ -127,7 +127,7 @@ _GLOBAL(giveup_fpu)
_GLOBAL(cvt_fd) _GLOBAL(cvt_fd)
lfd 0,THREAD_FPSCR(r5) /* load up fpscr value */ lfd 0,THREAD_FPSCR(r5) /* load up fpscr value */
mtfsf 0xff,0 MTFSF_L(0)
lfs 0,0(r3) lfs 0,0(r3)
stfd 0,0(r4) stfd 0,0(r4)
mffs 0 mffs 0
...@@ -136,7 +136,7 @@ _GLOBAL(cvt_fd) ...@@ -136,7 +136,7 @@ _GLOBAL(cvt_fd)
_GLOBAL(cvt_df) _GLOBAL(cvt_df)
lfd 0,THREAD_FPSCR(r5) /* load up fpscr value */ lfd 0,THREAD_FPSCR(r5) /* load up fpscr value */
mtfsf 0xff,0 MTFSF_L(0)
lfd 0,0(r3) lfd 0,0(r3)
stfs 0,0(r4) stfs 0,0(r4)
mffs 0 mffs 0
......
...@@ -973,6 +973,13 @@ __secondary_start_gemini: ...@@ -973,6 +973,13 @@ __secondary_start_gemini:
b __secondary_start b __secondary_start
#endif /* CONFIG_GEMINI */ #endif /* CONFIG_GEMINI */
.globl __secondary_start_mpc86xx
__secondary_start_mpc86xx:
mfspr r3, SPRN_PIR
stw r3, __secondary_hold_acknowledge@l(0)
mr r24, r3 /* cpu # */
b __secondary_start
.globl __secondary_start_pmac_0 .globl __secondary_start_pmac_0
__secondary_start_pmac_0: __secondary_start_pmac_0:
/* NB the entries for cpus 0, 1, 2 must each occupy 8 bytes. */ /* NB the entries for cpus 0, 1, 2 must each occupy 8 bytes. */
...@@ -1088,7 +1095,12 @@ load_up_mmu: ...@@ -1088,7 +1095,12 @@ load_up_mmu:
LOAD_BAT(1,r3,r4,r5) LOAD_BAT(1,r3,r4,r5)
LOAD_BAT(2,r3,r4,r5) LOAD_BAT(2,r3,r4,r5)
LOAD_BAT(3,r3,r4,r5) LOAD_BAT(3,r3,r4,r5)
BEGIN_FTR_SECTION
LOAD_BAT(4,r3,r4,r5)
LOAD_BAT(5,r3,r4,r5)
LOAD_BAT(6,r3,r4,r5)
LOAD_BAT(7,r3,r4,r5)
END_FTR_SECTION_IFSET(CPU_FTR_HAS_HIGH_BATS)
blr blr
/* /*
......
...@@ -316,6 +316,21 @@ label##_pSeries: \ ...@@ -316,6 +316,21 @@ label##_pSeries: \
mtspr SPRN_SPRG1,r13; /* save r13 */ \ mtspr SPRN_SPRG1,r13; /* save r13 */ \
EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, label##_common) EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, label##_common)
#define HSTD_EXCEPTION_PSERIES(n, label) \
. = n; \
.globl label##_pSeries; \
label##_pSeries: \
HMT_MEDIUM; \
mtspr SPRN_SPRG1,r20; /* save r20 */ \
mfspr r20,SPRN_HSRR0; /* copy HSRR0 to SRR0 */ \
mtspr SPRN_SRR0,r20; \
mfspr r20,SPRN_HSRR1; /* copy HSRR0 to SRR0 */ \
mtspr SPRN_SRR1,r20; \
mfspr r20,SPRN_SPRG1; /* restore r20 */ \
mtspr SPRN_SPRG1,r13; /* save r13 */ \
EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, label##_common)
#define STD_EXCEPTION_ISERIES(n, label, area) \ #define STD_EXCEPTION_ISERIES(n, label, area) \
.globl label##_iSeries; \ .globl label##_iSeries; \
label##_iSeries: \ label##_iSeries: \
...@@ -544,8 +559,17 @@ system_call_pSeries: ...@@ -544,8 +559,17 @@ system_call_pSeries:
STD_EXCEPTION_PSERIES(0xf20, altivec_unavailable) STD_EXCEPTION_PSERIES(0xf20, altivec_unavailable)
#ifdef CONFIG_CBE_RAS
HSTD_EXCEPTION_PSERIES(0x1200, cbe_system_error)
#endif /* CONFIG_CBE_RAS */
STD_EXCEPTION_PSERIES(0x1300, instruction_breakpoint) STD_EXCEPTION_PSERIES(0x1300, instruction_breakpoint)
#ifdef CONFIG_CBE_RAS
HSTD_EXCEPTION_PSERIES(0x1600, cbe_maintenance)
#endif /* CONFIG_CBE_RAS */
STD_EXCEPTION_PSERIES(0x1700, altivec_assist) STD_EXCEPTION_PSERIES(0x1700, altivec_assist)
#ifdef CONFIG_CBE_RAS
HSTD_EXCEPTION_PSERIES(0x1800, cbe_thermal)
#endif /* CONFIG_CBE_RAS */
. = 0x3000 . = 0x3000
...@@ -827,6 +851,11 @@ machine_check_common: ...@@ -827,6 +851,11 @@ machine_check_common:
#else #else
STD_EXCEPTION_COMMON(0x1700, altivec_assist, .unknown_exception) STD_EXCEPTION_COMMON(0x1700, altivec_assist, .unknown_exception)
#endif #endif
#ifdef CONFIG_CBE_RAS
STD_EXCEPTION_COMMON(0x1200, cbe_system_error, .cbe_system_error_exception)
STD_EXCEPTION_COMMON(0x1600, cbe_maintenance, .cbe_maintenance_exception)
STD_EXCEPTION_COMMON(0x1800, cbe_thermal, .cbe_thermal_exception)
#endif /* CONFIG_CBE_RAS */
/* /*
* Here we have detected that the kernel stack pointer is bad. * Here we have detected that the kernel stack pointer is bad.
......
...@@ -106,8 +106,6 @@ EXPORT_SYMBOL(iowrite32_rep); ...@@ -106,8 +106,6 @@ EXPORT_SYMBOL(iowrite32_rep);
void __iomem *ioport_map(unsigned long port, unsigned int len) void __iomem *ioport_map(unsigned long port, unsigned int len)
{ {
if (!_IO_IS_VALID(port))
return NULL;
return (void __iomem *) (port+pci_io_base); return (void __iomem *) (port+pci_io_base);
} }
......
...@@ -418,10 +418,11 @@ void iommu_unmap_sg(struct iommu_table *tbl, struct scatterlist *sglist, ...@@ -418,10 +418,11 @@ void iommu_unmap_sg(struct iommu_table *tbl, struct scatterlist *sglist,
* Build a iommu_table structure. This contains a bit map which * Build a iommu_table structure. This contains a bit map which
* is used to manage allocation of the tce space. * is used to manage allocation of the tce space.
*/ */
struct iommu_table *iommu_init_table(struct iommu_table *tbl) struct iommu_table *iommu_init_table(struct iommu_table *tbl, int nid)
{ {
unsigned long sz; unsigned long sz;
static int welcomed = 0; static int welcomed = 0;
struct page *page;
/* Set aside 1/4 of the table for large allocations. */ /* Set aside 1/4 of the table for large allocations. */
tbl->it_halfpoint = tbl->it_size * 3 / 4; tbl->it_halfpoint = tbl->it_size * 3 / 4;
...@@ -429,10 +430,10 @@ struct iommu_table *iommu_init_table(struct iommu_table *tbl) ...@@ -429,10 +430,10 @@ struct iommu_table *iommu_init_table(struct iommu_table *tbl)
/* number of bytes needed for the bitmap */ /* number of bytes needed for the bitmap */
sz = (tbl->it_size + 7) >> 3; sz = (tbl->it_size + 7) >> 3;
tbl->it_map = (unsigned long *)__get_free_pages(GFP_ATOMIC, get_order(sz)); page = alloc_pages_node(nid, GFP_ATOMIC, get_order(sz));
if (!tbl->it_map) if (!page)
panic("iommu_init_table: Can't allocate %ld bytes\n", sz); panic("iommu_init_table: Can't allocate %ld bytes\n", sz);
tbl->it_map = page_address(page);
memset(tbl->it_map, 0, sz); memset(tbl->it_map, 0, sz);
tbl->it_hint = 0; tbl->it_hint = 0;
...@@ -536,11 +537,12 @@ void iommu_unmap_single(struct iommu_table *tbl, dma_addr_t dma_handle, ...@@ -536,11 +537,12 @@ void iommu_unmap_single(struct iommu_table *tbl, dma_addr_t dma_handle,
* to the dma address (mapping) of the first page. * to the dma address (mapping) of the first page.
*/ */
void *iommu_alloc_coherent(struct iommu_table *tbl, size_t size, void *iommu_alloc_coherent(struct iommu_table *tbl, size_t size,
dma_addr_t *dma_handle, unsigned long mask, gfp_t flag) dma_addr_t *dma_handle, unsigned long mask, gfp_t flag, int node)
{ {
void *ret = NULL; void *ret = NULL;
dma_addr_t mapping; dma_addr_t mapping;
unsigned int npages, order; unsigned int npages, order;
struct page *page;
size = PAGE_ALIGN(size); size = PAGE_ALIGN(size);
npages = size >> PAGE_SHIFT; npages = size >> PAGE_SHIFT;
...@@ -560,9 +562,10 @@ void *iommu_alloc_coherent(struct iommu_table *tbl, size_t size, ...@@ -560,9 +562,10 @@ void *iommu_alloc_coherent(struct iommu_table *tbl, size_t size,
return NULL; return NULL;
/* Alloc enough pages (and possibly more) */ /* Alloc enough pages (and possibly more) */
ret = (void *)__get_free_pages(flag, order); page = alloc_pages_node(node, flag, order);
if (!ret) if (!page)
return NULL; return NULL;
ret = page_address(page);
memset(ret, 0, size); memset(ret, 0, size);
/* Set up tces to cover the allocated range */ /* Set up tces to cover the allocated range */
...@@ -570,8 +573,8 @@ void *iommu_alloc_coherent(struct iommu_table *tbl, size_t size, ...@@ -570,8 +573,8 @@ void *iommu_alloc_coherent(struct iommu_table *tbl, size_t size,
mask >> PAGE_SHIFT, order); mask >> PAGE_SHIFT, order);
if (mapping == DMA_ERROR_CODE) { if (mapping == DMA_ERROR_CODE) {
free_pages((unsigned long)ret, order); free_pages((unsigned long)ret, order);
ret = NULL; return NULL;
} else }
*dma_handle = mapping; *dma_handle = mapping;
return ret; return ret;
} }
......
...@@ -47,6 +47,7 @@ ...@@ -47,6 +47,7 @@
#include <linux/cpumask.h> #include <linux/cpumask.h>
#include <linux/profile.h> #include <linux/profile.h>
#include <linux/bitops.h> #include <linux/bitops.h>
#include <linux/pci.h>
#include <asm/uaccess.h> #include <asm/uaccess.h>
#include <asm/system.h> #include <asm/system.h>
...@@ -436,6 +437,30 @@ void do_softirq(void) ...@@ -436,6 +437,30 @@ void do_softirq(void)
} }
EXPORT_SYMBOL(do_softirq); EXPORT_SYMBOL(do_softirq);
#ifdef CONFIG_PCI_MSI
int pci_enable_msi(struct pci_dev * pdev)
{
if (ppc_md.enable_msi)
return ppc_md.enable_msi(pdev);
else
return -1;
}
void pci_disable_msi(struct pci_dev * pdev)
{
if (ppc_md.disable_msi)
ppc_md.disable_msi(pdev);
}
void pci_scan_msi_device(struct pci_dev *dev) {}
int pci_enable_msix(struct pci_dev* dev, struct msix_entry *entries, int nvec) {return -1;}
void pci_disable_msix(struct pci_dev *dev) {}
void msi_remove_pci_irq_vectors(struct pci_dev *dev) {}
void disable_msi_mode(struct pci_dev *dev, int pos, int type) {}
void pci_no_msi(void) {}
#endif
#ifdef CONFIG_PPC64 #ifdef CONFIG_PPC64
static int __init setup_noirqdistrib(char *str) static int __init setup_noirqdistrib(char *str)
{ {
......
...@@ -521,10 +521,10 @@ static ssize_t lparcfg_write(struct file *file, const char __user * buf, ...@@ -521,10 +521,10 @@ static ssize_t lparcfg_write(struct file *file, const char __user * buf,
current_weight = (resource >> 5 * 8) & 0xFF; current_weight = (resource >> 5 * 8) & 0xFF;
pr_debug("%s: current_entitled = %lu, current_weight = %lu\n", pr_debug("%s: current_entitled = %lu, current_weight = %u\n",
__FUNCTION__, current_entitled, current_weight); __FUNCTION__, current_entitled, current_weight);
pr_debug("%s: new_entitled = %lu, new_weight = %lu\n", pr_debug("%s: new_entitled = %lu, new_weight = %u\n",
__FUNCTION__, *new_entitled_ptr, *new_weight_ptr); __FUNCTION__, *new_entitled_ptr, *new_weight_ptr);
retval = plpar_hcall_norets(H_SET_PPP, *new_entitled_ptr, retval = plpar_hcall_norets(H_SET_PPP, *new_entitled_ptr,
......
...@@ -21,6 +21,7 @@ ...@@ -21,6 +21,7 @@
#include <asm/machdep.h> #include <asm/machdep.h>
#include <asm/cacheflush.h> #include <asm/cacheflush.h>
#include <asm/paca.h> #include <asm/paca.h>
#include <asm/lmb.h>
#include <asm/mmu.h> #include <asm/mmu.h>
#include <asm/sections.h> /* _end */ #include <asm/sections.h> /* _end */
#include <asm/prom.h> #include <asm/prom.h>
...@@ -335,7 +336,105 @@ static void __init export_htab_values(void) ...@@ -335,7 +336,105 @@ static void __init export_htab_values(void)
of_node_put(node); of_node_put(node);
} }
static struct property crashk_base_prop = {
.name = "linux,crashkernel-base",
.length = sizeof(unsigned long),
.value = (unsigned char *)&crashk_res.start,
};
static unsigned long crashk_size;
static struct property crashk_size_prop = {
.name = "linux,crashkernel-size",
.length = sizeof(unsigned long),
.value = (unsigned char *)&crashk_size,
};
static void __init export_crashk_values(void)
{
struct device_node *node;
struct property *prop;
node = of_find_node_by_path("/chosen");
if (!node)
return;
/* There might be existing crash kernel properties, but we can't
* be sure what's in them, so remove them. */
prop = of_find_property(node, "linux,crashkernel-base", NULL);
if (prop)
prom_remove_property(node, prop);
prop = of_find_property(node, "linux,crashkernel-size", NULL);
if (prop)
prom_remove_property(node, prop);
if (crashk_res.start != 0) {
prom_add_property(node, &crashk_base_prop);
crashk_size = crashk_res.end - crashk_res.start + 1;
prom_add_property(node, &crashk_size_prop);
}
of_node_put(node);
}
void __init kexec_setup(void) void __init kexec_setup(void)
{ {
export_htab_values(); export_htab_values();
export_crashk_values();
}
static int __init early_parse_crashk(char *p)
{
unsigned long size;
if (!p)
return 1;
size = memparse(p, &p);
if (*p == '@')
crashk_res.start = memparse(p + 1, &p);
else
crashk_res.start = KDUMP_KERNELBASE;
crashk_res.end = crashk_res.start + size - 1;
return 0;
}
early_param("crashkernel", early_parse_crashk);
void __init reserve_crashkernel(void)
{
unsigned long size;
if (crashk_res.start == 0)
return;
/* We might have got these values via the command line or the
* device tree, either way sanitise them now. */
size = crashk_res.end - crashk_res.start + 1;
if (crashk_res.start != KDUMP_KERNELBASE)
printk("Crash kernel location must be 0x%x\n",
KDUMP_KERNELBASE);
crashk_res.start = KDUMP_KERNELBASE;
size = PAGE_ALIGN(size);
crashk_res.end = crashk_res.start + size - 1;
/* Crash kernel trumps memory limit */
if (memory_limit && memory_limit <= crashk_res.end) {
memory_limit = crashk_res.end + 1;
printk("Adjusted memory limit for crashkernel, now 0x%lx\n",
memory_limit);
}
lmb_reserve(crashk_res.start, size);
}
int overlaps_crashkernel(unsigned long start, unsigned long size)
{
return (start + size) > crashk_res.start && start <= crashk_res.end;
} }
...@@ -216,7 +216,7 @@ _GLOBAL(call_setup_cpu) ...@@ -216,7 +216,7 @@ _GLOBAL(call_setup_cpu)
lwz r4,0(r4) lwz r4,0(r4)
add r4,r4,r3 add r4,r4,r3
lwz r5,CPU_SPEC_SETUP(r4) lwz r5,CPU_SPEC_SETUP(r4)
cmpi 0,r5,0 cmpwi 0,r5,0
add r5,r5,r3 add r5,r5,r3
beqlr beqlr
mtctr r5 mtctr r5
......
...@@ -482,7 +482,9 @@ _GLOBAL(identify_cpu) ...@@ -482,7 +482,9 @@ _GLOBAL(identify_cpu)
sub r0,r3,r5 sub r0,r3,r5
std r0,0(r4) std r0,0(r4)
ld r4,CPU_SPEC_SETUP(r3) ld r4,CPU_SPEC_SETUP(r3)
cmpdi 0,r4,0
add r4,r4,r5 add r4,r4,r5
beqlr
ld r4,0(r4) ld r4,0(r4)
add r4,r4,r5 add r4,r4,r5
mtctr r4 mtctr r4
...@@ -768,9 +770,6 @@ _GLOBAL(giveup_altivec) ...@@ -768,9 +770,6 @@ _GLOBAL(giveup_altivec)
#endif /* CONFIG_ALTIVEC */ #endif /* CONFIG_ALTIVEC */
_GLOBAL(__setup_cpu_power3)
blr
_GLOBAL(execve) _GLOBAL(execve)
li r0,__NR_execve li r0,__NR_execve
sc sc
......
...@@ -204,7 +204,7 @@ static void nvram_print_partitions(char * label) ...@@ -204,7 +204,7 @@ static void nvram_print_partitions(char * label)
printk(KERN_WARNING "indx\t\tsig\tchks\tlen\tname\n"); printk(KERN_WARNING "indx\t\tsig\tchks\tlen\tname\n");
list_for_each(p, &nvram_part->partition) { list_for_each(p, &nvram_part->partition) {
tmp_part = list_entry(p, struct nvram_partition, partition); tmp_part = list_entry(p, struct nvram_partition, partition);
printk(KERN_WARNING "%d \t%02x\t%02x\t%d\t%s\n", printk(KERN_WARNING "%4d \t%02x\t%02x\t%d\t%s\n",
tmp_part->index, tmp_part->header.signature, tmp_part->index, tmp_part->header.signature,
tmp_part->header.checksum, tmp_part->header.length, tmp_part->header.checksum, tmp_part->header.length,
tmp_part->header.name); tmp_part->header.name);
......
...@@ -1113,9 +1113,10 @@ check_for_io_childs(struct pci_bus *bus, struct resource* res, int *found_vga) ...@@ -1113,9 +1113,10 @@ check_for_io_childs(struct pci_bus *bus, struct resource* res, int *found_vga)
int i; int i;
int rc = 0; int rc = 0;
#define push_end(res, size) do { unsigned long __sz = (size) ; \ #define push_end(res, mask) do { \
res->end = ((res->end + __sz) / (__sz + 1)) * (__sz + 1) + __sz; \ BUG_ON((mask+1) & mask); \
} while (0) res->end = (res->end + mask) | mask; \
} while (0)
list_for_each_entry(dev, &bus->devices, bus_list) { list_for_each_entry(dev, &bus->devices, bus_list) {
u16 class = dev->class >> 8; u16 class = dev->class >> 8;
......
...@@ -42,14 +42,6 @@ ...@@ -42,14 +42,6 @@
unsigned long pci_probe_only = 1; unsigned long pci_probe_only = 1;
int pci_assign_all_buses = 0; int pci_assign_all_buses = 0;
/*
* legal IO pages under MAX_ISA_PORT. This is to ensure we don't touch
* devices we don't have access to.
*/
unsigned long io_page_mask;
EXPORT_SYMBOL(io_page_mask);
#ifdef CONFIG_PPC_MULTIPLATFORM #ifdef CONFIG_PPC_MULTIPLATFORM
static void fixup_resource(struct resource *res, struct pci_dev *dev); static void fixup_resource(struct resource *res, struct pci_dev *dev);
static void do_bus_setup(struct pci_bus *bus); static void do_bus_setup(struct pci_bus *bus);
...@@ -235,8 +227,10 @@ struct pci_controller * pcibios_alloc_controller(struct device_node *dev) ...@@ -235,8 +227,10 @@ struct pci_controller * pcibios_alloc_controller(struct device_node *dev)
pci_setup_pci_controller(phb); pci_setup_pci_controller(phb);
phb->arch_data = dev; phb->arch_data = dev;
phb->is_dynamic = mem_init_done; phb->is_dynamic = mem_init_done;
if (dev) if (dev) {
PHB_SET_NODE(phb, of_node_to_nid(dev));
add_linux_pci_domain(dev, phb); add_linux_pci_domain(dev, phb);
}
return phb; return phb;
} }
...@@ -396,7 +390,7 @@ struct pci_dev *of_create_pci_dev(struct device_node *node, ...@@ -396,7 +390,7 @@ struct pci_dev *of_create_pci_dev(struct device_node *node,
dev->current_state = 4; /* unknown power state */ dev->current_state = 4; /* unknown power state */
if (!strcmp(type, "pci")) { if (!strcmp(type, "pci") || !strcmp(type, "pciex")) {
/* a PCI-PCI bridge */ /* a PCI-PCI bridge */
dev->hdr_type = PCI_HEADER_TYPE_BRIDGE; dev->hdr_type = PCI_HEADER_TYPE_BRIDGE;
dev->rom_base_reg = PCI_ROM_ADDRESS1; dev->rom_base_reg = PCI_ROM_ADDRESS1;
...@@ -605,7 +599,7 @@ static int __init pcibios_init(void) ...@@ -605,7 +599,7 @@ static int __init pcibios_init(void)
iSeries_pcibios_init(); iSeries_pcibios_init();
#endif #endif
printk("PCI: Probing PCI hardware\n"); printk(KERN_DEBUG "PCI: Probing PCI hardware\n");
/* Scan all of the recorded PCI controllers. */ /* Scan all of the recorded PCI controllers. */
list_for_each_entry_safe(hose, tmp, &hose_list, list_node) { list_for_each_entry_safe(hose, tmp, &hose_list, list_node) {
...@@ -630,14 +624,14 @@ static int __init pcibios_init(void) ...@@ -630,14 +624,14 @@ static int __init pcibios_init(void)
/* Cache the location of the ISA bridge (if we have one) */ /* Cache the location of the ISA bridge (if we have one) */
ppc64_isabridge_dev = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL); ppc64_isabridge_dev = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL);
if (ppc64_isabridge_dev != NULL) if (ppc64_isabridge_dev != NULL)
printk("ISA bridge at %s\n", pci_name(ppc64_isabridge_dev)); printk(KERN_DEBUG "ISA bridge at %s\n", pci_name(ppc64_isabridge_dev));
#ifdef CONFIG_PPC_MULTIPLATFORM #ifdef CONFIG_PPC_MULTIPLATFORM
/* map in PCI I/O space */ /* map in PCI I/O space */
phbs_remap_io(); phbs_remap_io();
#endif #endif
printk("PCI: Probing PCI hardware done\n"); printk(KERN_DEBUG "PCI: Probing PCI hardware done\n");
return 0; return 0;
} }
...@@ -804,7 +798,7 @@ static pgprot_t __pci_mmap_set_pgprot(struct pci_dev *dev, struct resource *rp, ...@@ -804,7 +798,7 @@ static pgprot_t __pci_mmap_set_pgprot(struct pci_dev *dev, struct resource *rp,
else else
prot |= _PAGE_GUARDED; prot |= _PAGE_GUARDED;
printk("PCI map for %s:%lx, prot: %lx\n", pci_name(dev), rp->start, printk(KERN_DEBUG "PCI map for %s:%lx, prot: %lx\n", pci_name(dev), rp->start,
prot); prot);
return __pgprot(prot); return __pgprot(prot);
...@@ -894,8 +888,8 @@ int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma, ...@@ -894,8 +888,8 @@ int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
return ret; return ret;
} }
#ifdef CONFIG_PPC_MULTIPLATFORM static ssize_t pci_show_devspec(struct device *dev,
static ssize_t pci_show_devspec(struct device *dev, struct device_attribute *attr, char *buf) struct device_attribute *attr, char *buf)
{ {
struct pci_dev *pdev; struct pci_dev *pdev;
struct device_node *np; struct device_node *np;
...@@ -907,13 +901,10 @@ static ssize_t pci_show_devspec(struct device *dev, struct device_attribute *att ...@@ -907,13 +901,10 @@ static ssize_t pci_show_devspec(struct device *dev, struct device_attribute *att
return sprintf(buf, "%s", np->full_name); return sprintf(buf, "%s", np->full_name);
} }
static DEVICE_ATTR(devspec, S_IRUGO, pci_show_devspec, NULL); static DEVICE_ATTR(devspec, S_IRUGO, pci_show_devspec, NULL);
#endif /* CONFIG_PPC_MULTIPLATFORM */
void pcibios_add_platform_entries(struct pci_dev *pdev) void pcibios_add_platform_entries(struct pci_dev *pdev)
{ {
#ifdef CONFIG_PPC_MULTIPLATFORM
device_create_file(&pdev->dev, &dev_attr_devspec); device_create_file(&pdev->dev, &dev_attr_devspec);
#endif /* CONFIG_PPC_MULTIPLATFORM */
} }
#ifdef CONFIG_PPC_MULTIPLATFORM #ifdef CONFIG_PPC_MULTIPLATFORM
...@@ -1104,8 +1095,6 @@ void __init pci_setup_phb_io(struct pci_controller *hose, int primary) ...@@ -1104,8 +1095,6 @@ void __init pci_setup_phb_io(struct pci_controller *hose, int primary)
pci_process_ISA_OF_ranges(isa_dn, hose->io_base_phys, pci_process_ISA_OF_ranges(isa_dn, hose->io_base_phys,
hose->io_base_virt); hose->io_base_virt);
of_node_put(isa_dn); of_node_put(isa_dn);
/* Allow all IO */
io_page_mask = -1;
} }
} }
...@@ -1212,7 +1201,7 @@ int remap_bus_range(struct pci_bus *bus) ...@@ -1212,7 +1201,7 @@ int remap_bus_range(struct pci_bus *bus)
return 1; return 1;
if (start_phys == 0) if (start_phys == 0)
return 1; return 1;
printk("mapping IO %lx -> %lx, size: %lx\n", start_phys, start_virt, size); printk(KERN_DEBUG "mapping IO %lx -> %lx, size: %lx\n", start_phys, start_virt, size);
if (__ioremap_explicit(start_phys, start_virt, size, if (__ioremap_explicit(start_phys, start_virt, size,
_PAGE_NO_CACHE | _PAGE_GUARDED)) _PAGE_NO_CACHE | _PAGE_GUARDED))
return 1; return 1;
...@@ -1232,27 +1221,13 @@ static void phbs_remap_io(void) ...@@ -1232,27 +1221,13 @@ static void phbs_remap_io(void)
static void __devinit fixup_resource(struct resource *res, struct pci_dev *dev) static void __devinit fixup_resource(struct resource *res, struct pci_dev *dev)
{ {
struct pci_controller *hose = pci_bus_to_host(dev->bus); struct pci_controller *hose = pci_bus_to_host(dev->bus);
unsigned long start, end, mask, offset; unsigned long offset;
if (res->flags & IORESOURCE_IO) { if (res->flags & IORESOURCE_IO) {
offset = (unsigned long)hose->io_base_virt - pci_io_base; offset = (unsigned long)hose->io_base_virt - pci_io_base;
start = res->start += offset; res->start += offset;
end = res->end += offset; res->end += offset;
/* Need to allow IO access to pages that are in the
ISA range */
if (start < MAX_ISA_PORT) {
if (end > MAX_ISA_PORT)
end = MAX_ISA_PORT;
start >>= PAGE_SHIFT;
end >>= PAGE_SHIFT;
/* get the range of pages for the map */
mask = ((1 << (end+1)) - 1) ^ ((1 << start) - 1);
io_page_mask |= mask;
}
} else if (res->flags & IORESOURCE_MEM) { } else if (res->flags & IORESOURCE_MEM) {
res->start += hose->pci_mem_offset; res->start += hose->pci_mem_offset;
res->end += hose->pci_mem_offset; res->end += hose->pci_mem_offset;
...@@ -1442,3 +1417,12 @@ long sys_pciconfig_iobase(long which, unsigned long in_bus, ...@@ -1442,3 +1417,12 @@ long sys_pciconfig_iobase(long which, unsigned long in_bus,
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
#ifdef CONFIG_NUMA
int pcibus_to_node(struct pci_bus *bus)
{
struct pci_controller *phb = pci_bus_to_host(bus);
return phb->node;
}
EXPORT_SYMBOL(pcibus_to_node);
#endif
...@@ -82,13 +82,17 @@ static int pci_direct_dma_supported(struct device *dev, u64 mask) ...@@ -82,13 +82,17 @@ static int pci_direct_dma_supported(struct device *dev, u64 mask)
return mask < 0x100000000ull; return mask < 0x100000000ull;
} }
static struct dma_mapping_ops pci_direct_ops = {
.alloc_coherent = pci_direct_alloc_coherent,
.free_coherent = pci_direct_free_coherent,
.map_single = pci_direct_map_single,
.unmap_single = pci_direct_unmap_single,
.map_sg = pci_direct_map_sg,
.unmap_sg = pci_direct_unmap_sg,
.dma_supported = pci_direct_dma_supported,
};
void __init pci_direct_iommu_init(void) void __init pci_direct_iommu_init(void)
{ {
pci_dma_ops.alloc_coherent = pci_direct_alloc_coherent; pci_dma_ops = pci_direct_ops;
pci_dma_ops.free_coherent = pci_direct_free_coherent;
pci_dma_ops.map_single = pci_direct_map_single;
pci_dma_ops.unmap_single = pci_direct_unmap_single;
pci_dma_ops.map_sg = pci_direct_map_sg;
pci_dma_ops.unmap_sg = pci_direct_unmap_sg;
pci_dma_ops.dma_supported = pci_direct_dma_supported;
} }
...@@ -31,6 +31,7 @@ ...@@ -31,6 +31,7 @@
#include <asm/pci-bridge.h> #include <asm/pci-bridge.h>
#include <asm/pSeries_reconfig.h> #include <asm/pSeries_reconfig.h>
#include <asm/ppc-pci.h> #include <asm/ppc-pci.h>
#include <asm/firmware.h>
/* /*
* Traverse_func that inits the PCI fields of the device node. * Traverse_func that inits the PCI fields of the device node.
...@@ -59,6 +60,11 @@ static void * __devinit update_dn_pci_info(struct device_node *dn, void *data) ...@@ -59,6 +60,11 @@ static void * __devinit update_dn_pci_info(struct device_node *dn, void *data)
pdn->busno = (regs[0] >> 16) & 0xff; pdn->busno = (regs[0] >> 16) & 0xff;
pdn->devfn = (regs[0] >> 8) & 0xff; pdn->devfn = (regs[0] >> 8) & 0xff;
} }
if (firmware_has_feature(FW_FEATURE_ISERIES)) {
u32 *busp = (u32 *)get_property(dn, "linux,subbus", NULL);
if (busp)
pdn->bussubno = *busp;
}
pdn->pci_ext_config_space = (type && *type == 1); pdn->pci_ext_config_space = (type && *type == 1);
return NULL; return NULL;
......
...@@ -44,16 +44,16 @@ ...@@ -44,16 +44,16 @@
*/ */
#define PCI_GET_DN(dev) ((struct device_node *)((dev)->sysdata)) #define PCI_GET_DN(dev) ((struct device_node *)((dev)->sysdata))
static inline struct iommu_table *devnode_table(struct device *dev) static inline struct iommu_table *device_to_table(struct device *hwdev)
{ {
struct pci_dev *pdev; struct pci_dev *pdev;
if (!dev) { if (!hwdev) {
pdev = ppc64_isabridge_dev; pdev = ppc64_isabridge_dev;
if (!pdev) if (!pdev)
return NULL; return NULL;
} else } else
pdev = to_pci_dev(dev); pdev = to_pci_dev(hwdev);
return PCI_DN(PCI_GET_DN(pdev))->iommu_table; return PCI_DN(PCI_GET_DN(pdev))->iommu_table;
} }
...@@ -85,14 +85,15 @@ static inline unsigned long device_to_mask(struct device *hwdev) ...@@ -85,14 +85,15 @@ static inline unsigned long device_to_mask(struct device *hwdev)
static void *pci_iommu_alloc_coherent(struct device *hwdev, size_t size, static void *pci_iommu_alloc_coherent(struct device *hwdev, size_t size,
dma_addr_t *dma_handle, gfp_t flag) dma_addr_t *dma_handle, gfp_t flag)
{ {
return iommu_alloc_coherent(devnode_table(hwdev), size, dma_handle, return iommu_alloc_coherent(device_to_table(hwdev), size, dma_handle,
device_to_mask(hwdev), flag); device_to_mask(hwdev), flag,
pcibus_to_node(to_pci_dev(hwdev)->bus));
} }
static void pci_iommu_free_coherent(struct device *hwdev, size_t size, static void pci_iommu_free_coherent(struct device *hwdev, size_t size,
void *vaddr, dma_addr_t dma_handle) void *vaddr, dma_addr_t dma_handle)
{ {
iommu_free_coherent(devnode_table(hwdev), size, vaddr, dma_handle); iommu_free_coherent(device_to_table(hwdev), size, vaddr, dma_handle);
} }
/* Creates TCEs for a user provided buffer. The user buffer must be /* Creates TCEs for a user provided buffer. The user buffer must be
...@@ -104,7 +105,7 @@ static void pci_iommu_free_coherent(struct device *hwdev, size_t size, ...@@ -104,7 +105,7 @@ static void pci_iommu_free_coherent(struct device *hwdev, size_t size,
static dma_addr_t pci_iommu_map_single(struct device *hwdev, void *vaddr, static dma_addr_t pci_iommu_map_single(struct device *hwdev, void *vaddr,
size_t size, enum dma_data_direction direction) size_t size, enum dma_data_direction direction)
{ {
return iommu_map_single(devnode_table(hwdev), vaddr, size, return iommu_map_single(device_to_table(hwdev), vaddr, size,
device_to_mask(hwdev), direction); device_to_mask(hwdev), direction);
} }
...@@ -112,27 +113,27 @@ static dma_addr_t pci_iommu_map_single(struct device *hwdev, void *vaddr, ...@@ -112,27 +113,27 @@ static dma_addr_t pci_iommu_map_single(struct device *hwdev, void *vaddr,
static void pci_iommu_unmap_single(struct device *hwdev, dma_addr_t dma_handle, static void pci_iommu_unmap_single(struct device *hwdev, dma_addr_t dma_handle,
size_t size, enum dma_data_direction direction) size_t size, enum dma_data_direction direction)
{ {
iommu_unmap_single(devnode_table(hwdev), dma_handle, size, direction); iommu_unmap_single(device_to_table(hwdev), dma_handle, size, direction);
} }
static int pci_iommu_map_sg(struct device *pdev, struct scatterlist *sglist, static int pci_iommu_map_sg(struct device *pdev, struct scatterlist *sglist,
int nelems, enum dma_data_direction direction) int nelems, enum dma_data_direction direction)
{ {
return iommu_map_sg(pdev, devnode_table(pdev), sglist, return iommu_map_sg(pdev, device_to_table(pdev), sglist,
nelems, device_to_mask(pdev), direction); nelems, device_to_mask(pdev), direction);
} }
static void pci_iommu_unmap_sg(struct device *pdev, struct scatterlist *sglist, static void pci_iommu_unmap_sg(struct device *pdev, struct scatterlist *sglist,
int nelems, enum dma_data_direction direction) int nelems, enum dma_data_direction direction)
{ {
iommu_unmap_sg(devnode_table(pdev), sglist, nelems, direction); iommu_unmap_sg(device_to_table(pdev), sglist, nelems, direction);
} }
/* We support DMA to/from any memory page via the iommu */ /* We support DMA to/from any memory page via the iommu */
static int pci_iommu_dma_supported(struct device *dev, u64 mask) static int pci_iommu_dma_supported(struct device *dev, u64 mask)
{ {
struct iommu_table *tbl = devnode_table(dev); struct iommu_table *tbl = device_to_table(dev);
if (!tbl || tbl->it_offset > mask) { if (!tbl || tbl->it_offset > mask) {
printk(KERN_INFO "Warning: IOMMU table offset too big for device mask\n"); printk(KERN_INFO "Warning: IOMMU table offset too big for device mask\n");
...@@ -147,13 +148,17 @@ static int pci_iommu_dma_supported(struct device *dev, u64 mask) ...@@ -147,13 +148,17 @@ static int pci_iommu_dma_supported(struct device *dev, u64 mask)
return 1; return 1;
} }
struct dma_mapping_ops pci_iommu_ops = {
.alloc_coherent = pci_iommu_alloc_coherent,
.free_coherent = pci_iommu_free_coherent,
.map_single = pci_iommu_map_single,
.unmap_single = pci_iommu_unmap_single,
.map_sg = pci_iommu_map_sg,
.unmap_sg = pci_iommu_unmap_sg,
.dma_supported = pci_iommu_dma_supported,
};
void pci_iommu_init(void) void pci_iommu_init(void)
{ {
pci_dma_ops.alloc_coherent = pci_iommu_alloc_coherent; pci_dma_ops = pci_iommu_ops;
pci_dma_ops.free_coherent = pci_iommu_free_coherent;
pci_dma_ops.map_single = pci_iommu_map_single;
pci_dma_ops.unmap_single = pci_iommu_unmap_single;
pci_dma_ops.map_sg = pci_iommu_map_sg;
pci_dma_ops.unmap_sg = pci_iommu_unmap_sg;
pci_dma_ops.dma_supported = pci_iommu_dma_supported;
} }
...@@ -52,7 +52,7 @@ static int __init proc_ppc64_create(void) ...@@ -52,7 +52,7 @@ static int __init proc_ppc64_create(void)
if (!root) if (!root)
return 1; return 1;
if (!machine_is(pseries) && !machine_is(cell)) if (!of_find_node_by_path("/rtas"))
return 0; return 0;
if (!proc_mkdir("rtas", root)) if (!proc_mkdir("rtas", root))
......
...@@ -708,6 +708,61 @@ int get_fpexc_mode(struct task_struct *tsk, unsigned long adr) ...@@ -708,6 +708,61 @@ int get_fpexc_mode(struct task_struct *tsk, unsigned long adr)
return put_user(val, (unsigned int __user *) adr); return put_user(val, (unsigned int __user *) adr);
} }
int set_endian(struct task_struct *tsk, unsigned int val)
{
struct pt_regs *regs = tsk->thread.regs;
if ((val == PR_ENDIAN_LITTLE && !cpu_has_feature(CPU_FTR_REAL_LE)) ||
(val == PR_ENDIAN_PPC_LITTLE && !cpu_has_feature(CPU_FTR_PPC_LE)))
return -EINVAL;
if (regs == NULL)
return -EINVAL;
if (val == PR_ENDIAN_BIG)
regs->msr &= ~MSR_LE;
else if (val == PR_ENDIAN_LITTLE || val == PR_ENDIAN_PPC_LITTLE)
regs->msr |= MSR_LE;
else
return -EINVAL;
return 0;
}
int get_endian(struct task_struct *tsk, unsigned long adr)
{
struct pt_regs *regs = tsk->thread.regs;
unsigned int val;
if (!cpu_has_feature(CPU_FTR_PPC_LE) &&
!cpu_has_feature(CPU_FTR_REAL_LE))
return -EINVAL;
if (regs == NULL)
return -EINVAL;
if (regs->msr & MSR_LE) {
if (cpu_has_feature(CPU_FTR_REAL_LE))
val = PR_ENDIAN_LITTLE;
else
val = PR_ENDIAN_PPC_LITTLE;
} else
val = PR_ENDIAN_BIG;
return put_user(val, (unsigned int __user *)adr);
}
int set_unalign_ctl(struct task_struct *tsk, unsigned int val)
{
tsk->thread.align_ctl = val;
return 0;
}
int get_unalign_ctl(struct task_struct *tsk, unsigned long adr)
{
return put_user(tsk->thread.align_ctl, (unsigned int __user *)adr);
}
#define TRUNC_PTR(x) ((typeof(x))(((unsigned long)(x)) & 0xffffffff)) #define TRUNC_PTR(x) ((typeof(x))(((unsigned long)(x)) & 0xffffffff))
int sys_clone(unsigned long clone_flags, unsigned long usp, int sys_clone(unsigned long clone_flags, unsigned long usp,
......
...@@ -50,6 +50,7 @@ ...@@ -50,6 +50,7 @@
#include <asm/machdep.h> #include <asm/machdep.h>
#include <asm/pSeries_reconfig.h> #include <asm/pSeries_reconfig.h>
#include <asm/pci-bridge.h> #include <asm/pci-bridge.h>
#include <asm/kexec.h>
#ifdef DEBUG #ifdef DEBUG
#define DBG(fmt...) printk(KERN_ERR fmt) #define DBG(fmt...) printk(KERN_ERR fmt)
...@@ -836,6 +837,42 @@ static unsigned long __init unflatten_dt_node(unsigned long mem, ...@@ -836,6 +837,42 @@ static unsigned long __init unflatten_dt_node(unsigned long mem,
return mem; return mem;
} }
static int __init early_parse_mem(char *p)
{
if (!p)
return 1;
memory_limit = PAGE_ALIGN(memparse(p, &p));
DBG("memory limit = 0x%lx\n", memory_limit);
return 0;
}
early_param("mem", early_parse_mem);
/*
* The device tree may be allocated below our memory limit, or inside the
* crash kernel region for kdump. If so, move it out now.
*/
static void move_device_tree(void)
{
unsigned long start, size;
void *p;
DBG("-> move_device_tree\n");
start = __pa(initial_boot_params);
size = initial_boot_params->totalsize;
if ((memory_limit && (start + size) > memory_limit) ||
overlaps_crashkernel(start, size)) {
p = __va(lmb_alloc_base(size, PAGE_SIZE, lmb.rmo_size));
memcpy(p, initial_boot_params, size);
initial_boot_params = (struct boot_param_header *)p;
DBG("Moved device tree to 0x%p\n", p);
}
DBG("<- move_device_tree\n");
}
/** /**
* unflattens the device-tree passed by the firmware, creating the * unflattens the device-tree passed by the firmware, creating the
...@@ -911,7 +948,10 @@ static struct ibm_pa_feature { ...@@ -911,7 +948,10 @@ static struct ibm_pa_feature {
{CPU_FTR_CTRL, 0, 0, 3, 0}, {CPU_FTR_CTRL, 0, 0, 3, 0},
{CPU_FTR_NOEXECUTE, 0, 0, 6, 0}, {CPU_FTR_NOEXECUTE, 0, 0, 6, 0},
{CPU_FTR_NODSISRALIGN, 0, 1, 1, 1}, {CPU_FTR_NODSISRALIGN, 0, 1, 1, 1},
#if 0
/* put this back once we know how to test if firmware does 64k IO */
{CPU_FTR_CI_LARGE_PAGE, 0, 1, 2, 0}, {CPU_FTR_CI_LARGE_PAGE, 0, 1, 2, 0},
#endif
}; };
static void __init check_cpu_pa_features(unsigned long node) static void __init check_cpu_pa_features(unsigned long node)
...@@ -1070,6 +1110,7 @@ static int __init early_init_dt_scan_chosen(unsigned long node, ...@@ -1070,6 +1110,7 @@ static int __init early_init_dt_scan_chosen(unsigned long node,
iommu_force_on = 1; iommu_force_on = 1;
#endif #endif
/* mem=x on the command line is the preferred mechanism */
lprop = of_get_flat_dt_prop(node, "linux,memory-limit", NULL); lprop = of_get_flat_dt_prop(node, "linux,memory-limit", NULL);
if (lprop) if (lprop)
memory_limit = *lprop; memory_limit = *lprop;
...@@ -1123,17 +1164,6 @@ static int __init early_init_dt_scan_chosen(unsigned long node, ...@@ -1123,17 +1164,6 @@ static int __init early_init_dt_scan_chosen(unsigned long node,
DBG("Command line is: %s\n", cmd_line); DBG("Command line is: %s\n", cmd_line);
if (strstr(cmd_line, "mem=")) {
char *p, *q;
for (q = cmd_line; (p = strstr(q, "mem=")) != 0; ) {
q = p + 4;
if (p > cmd_line && p[-1] != ' ')
continue;
memory_limit = memparse(q, &q);
}
}
/* break now */ /* break now */
return 1; return 1;
} }
...@@ -1237,9 +1267,17 @@ static void __init early_reserve_mem(void) ...@@ -1237,9 +1267,17 @@ static void __init early_reserve_mem(void)
{ {
u64 base, size; u64 base, size;
u64 *reserve_map; u64 *reserve_map;
unsigned long self_base;
unsigned long self_size;
reserve_map = (u64 *)(((unsigned long)initial_boot_params) + reserve_map = (u64 *)(((unsigned long)initial_boot_params) +
initial_boot_params->off_mem_rsvmap); initial_boot_params->off_mem_rsvmap);
/* before we do anything, lets reserve the dt blob */
self_base = __pa((unsigned long)initial_boot_params);
self_size = initial_boot_params->totalsize;
lmb_reserve(self_base, self_size);
#ifdef CONFIG_PPC32 #ifdef CONFIG_PPC32
/* /*
* Handle the case where we might be booting from an old kexec * Handle the case where we might be booting from an old kexec
...@@ -1254,6 +1292,9 @@ static void __init early_reserve_mem(void) ...@@ -1254,6 +1292,9 @@ static void __init early_reserve_mem(void)
size_32 = *(reserve_map_32++); size_32 = *(reserve_map_32++);
if (size_32 == 0) if (size_32 == 0)
break; break;
/* skip if the reservation is for the blob */
if (base_32 == self_base && size_32 == self_size)
continue;
DBG("reserving: %x -> %x\n", base_32, size_32); DBG("reserving: %x -> %x\n", base_32, size_32);
lmb_reserve(base_32, size_32); lmb_reserve(base_32, size_32);
} }
...@@ -1265,6 +1306,9 @@ static void __init early_reserve_mem(void) ...@@ -1265,6 +1306,9 @@ static void __init early_reserve_mem(void)
size = *(reserve_map++); size = *(reserve_map++);
if (size == 0) if (size == 0)
break; break;
/* skip if the reservation is for the blob */
if (base == self_base && size == self_size)
continue;
DBG("reserving: %llx -> %llx\n", base, size); DBG("reserving: %llx -> %llx\n", base, size);
lmb_reserve(base, size); lmb_reserve(base, size);
} }
...@@ -1292,18 +1336,26 @@ void __init early_init_devtree(void *params) ...@@ -1292,18 +1336,26 @@ void __init early_init_devtree(void *params)
lmb_init(); lmb_init();
of_scan_flat_dt(early_init_dt_scan_root, NULL); of_scan_flat_dt(early_init_dt_scan_root, NULL);
of_scan_flat_dt(early_init_dt_scan_memory, NULL); of_scan_flat_dt(early_init_dt_scan_memory, NULL);
lmb_enforce_memory_limit(memory_limit);
lmb_analyze();
DBG("Phys. mem: %lx\n", lmb_phys_mem_size()); /* Save command line for /proc/cmdline and then parse parameters */
strlcpy(saved_command_line, cmd_line, COMMAND_LINE_SIZE);
parse_early_param();
/* Reserve LMB regions used by kernel, initrd, dt, etc... */ /* Reserve LMB regions used by kernel, initrd, dt, etc... */
lmb_reserve(PHYSICAL_START, __pa(klimit) - PHYSICAL_START); lmb_reserve(PHYSICAL_START, __pa(klimit) - PHYSICAL_START);
#ifdef CONFIG_CRASH_DUMP reserve_kdump_trampoline();
lmb_reserve(0, KDUMP_RESERVE_LIMIT); reserve_crashkernel();
#endif
early_reserve_mem(); early_reserve_mem();
lmb_enforce_memory_limit(memory_limit);
lmb_analyze();
DBG("Phys. mem: %lx\n", lmb_phys_mem_size());
/* We may need to relocate the flat tree, do it now.
* FIXME .. and the initrd too? */
move_device_tree();
DBG("Scanning CPUs ...\n"); DBG("Scanning CPUs ...\n");
/* Retreive CPU related informations from the flat tree /* Retreive CPU related informations from the flat tree
...@@ -2053,29 +2105,46 @@ int prom_update_property(struct device_node *np, ...@@ -2053,29 +2105,46 @@ int prom_update_property(struct device_node *np,
return 0; return 0;
} }
#ifdef CONFIG_KEXEC
/* We may have allocated the flat device tree inside the crash kernel region
* in prom_init. If so we need to move it out into regular memory. */
void kdump_move_device_tree(void)
{
unsigned long start, end;
struct boot_param_header *new;
start = __pa((unsigned long)initial_boot_params);
end = start + initial_boot_params->totalsize;
if (end < crashk_res.start || start > crashk_res.end)
return;
new = (struct boot_param_header*) /* Find the device node for a given logical cpu number, also returns the cpu
__va(lmb_alloc(initial_boot_params->totalsize, PAGE_SIZE)); * local thread number (index in ibm,interrupt-server#s) if relevant and
* asked for (non NULL)
memcpy(new, initial_boot_params, initial_boot_params->totalsize); */
struct device_node *of_get_cpu_node(int cpu, unsigned int *thread)
{
int hardid;
struct device_node *np;
initial_boot_params = new; hardid = get_hard_smp_processor_id(cpu);
DBG("Flat device tree blob moved to %p\n", initial_boot_params); for_each_node_by_type(np, "cpu") {
u32 *intserv;
unsigned int plen, t;
/* XXX should we unreserve the old DT? */ /* Check for ibm,ppc-interrupt-server#s. If it doesn't exist
* fallback to "reg" property and assume no threads
*/
intserv = (u32 *)get_property(np, "ibm,ppc-interrupt-server#s",
&plen);
if (intserv == NULL) {
u32 *reg = (u32 *)get_property(np, "reg", NULL);
if (reg == NULL)
continue;
if (*reg == hardid) {
if (thread)
*thread = 0;
return np;
}
} else {
plen /= sizeof(u32);
for (t = 0; t < plen; t++) {
if (hardid == intserv[t]) {
if (thread)
*thread = t;
return np;
}
}
}
}
return NULL;
} }
#endif /* CONFIG_KEXEC */
...@@ -194,19 +194,12 @@ static int __initdata of_platform; ...@@ -194,19 +194,12 @@ static int __initdata of_platform;
static char __initdata prom_cmd_line[COMMAND_LINE_SIZE]; static char __initdata prom_cmd_line[COMMAND_LINE_SIZE];
static unsigned long __initdata prom_memory_limit;
static unsigned long __initdata alloc_top; static unsigned long __initdata alloc_top;
static unsigned long __initdata alloc_top_high; static unsigned long __initdata alloc_top_high;
static unsigned long __initdata alloc_bottom; static unsigned long __initdata alloc_bottom;
static unsigned long __initdata rmo_top; static unsigned long __initdata rmo_top;
static unsigned long __initdata ram_top; static unsigned long __initdata ram_top;
#ifdef CONFIG_KEXEC
static unsigned long __initdata prom_crashk_base;
static unsigned long __initdata prom_crashk_size;
#endif
static struct mem_map_entry __initdata mem_reserve_map[MEM_RESERVE_MAP_SIZE]; static struct mem_map_entry __initdata mem_reserve_map[MEM_RESERVE_MAP_SIZE];
static int __initdata mem_reserve_cnt; static int __initdata mem_reserve_cnt;
...@@ -574,7 +567,7 @@ static void __init early_cmdline_parse(void) ...@@ -574,7 +567,7 @@ static void __init early_cmdline_parse(void)
if ((long)_prom->chosen > 0) if ((long)_prom->chosen > 0)
l = prom_getprop(_prom->chosen, "bootargs", p, COMMAND_LINE_SIZE-1); l = prom_getprop(_prom->chosen, "bootargs", p, COMMAND_LINE_SIZE-1);
#ifdef CONFIG_CMDLINE #ifdef CONFIG_CMDLINE
if (l == 0) /* dbl check */ if (l <= 0 || p[0] == '\0') /* dbl check */
strlcpy(RELOC(prom_cmd_line), strlcpy(RELOC(prom_cmd_line),
RELOC(CONFIG_CMDLINE), sizeof(prom_cmd_line)); RELOC(CONFIG_CMDLINE), sizeof(prom_cmd_line));
#endif /* CONFIG_CMDLINE */ #endif /* CONFIG_CMDLINE */
...@@ -593,45 +586,6 @@ static void __init early_cmdline_parse(void) ...@@ -593,45 +586,6 @@ static void __init early_cmdline_parse(void)
RELOC(iommu_force_on) = 1; RELOC(iommu_force_on) = 1;
} }
#endif #endif
opt = strstr(RELOC(prom_cmd_line), RELOC("mem="));
if (opt) {
opt += 4;
RELOC(prom_memory_limit) = prom_memparse(opt, (const char **)&opt);
#ifdef CONFIG_PPC64
/* Align to 16 MB == size of ppc64 large page */
RELOC(prom_memory_limit) = ALIGN(RELOC(prom_memory_limit), 0x1000000);
#endif
}
#ifdef CONFIG_KEXEC
/*
* crashkernel=size@addr specifies the location to reserve for
* crash kernel.
*/
opt = strstr(RELOC(prom_cmd_line), RELOC("crashkernel="));
if (opt) {
opt += 12;
RELOC(prom_crashk_size) =
prom_memparse(opt, (const char **)&opt);
if (ALIGN(RELOC(prom_crashk_size), 0x1000000) !=
RELOC(prom_crashk_size)) {
prom_printf("Warning: crashkernel size is not "
"aligned to 16MB\n");
}
/*
* At present, the crash kernel always run at 32MB.
* Just ignore whatever user passed.
*/
RELOC(prom_crashk_base) = 0x2000000;
if (*opt == '@') {
prom_printf("Warning: PPC64 kdump kernel always runs "
"at 32 MB\n");
}
}
#endif
} }
#ifdef CONFIG_PPC_PSERIES #ifdef CONFIG_PPC_PSERIES
...@@ -1115,29 +1069,6 @@ static void __init prom_init_mem(void) ...@@ -1115,29 +1069,6 @@ static void __init prom_init_mem(void)
RELOC(alloc_bottom) = PAGE_ALIGN(RELOC(prom_initrd_end)); RELOC(alloc_bottom) = PAGE_ALIGN(RELOC(prom_initrd_end));
} }
/*
* If prom_memory_limit is set we reduce the upper limits *except* for
* alloc_top_high. This must be the real top of RAM so we can put
* TCE's up there.
*/
RELOC(alloc_top_high) = RELOC(ram_top);
if (RELOC(prom_memory_limit)) {
if (RELOC(prom_memory_limit) <= RELOC(alloc_bottom)) {
prom_printf("Ignoring mem=%x <= alloc_bottom.\n",
RELOC(prom_memory_limit));
RELOC(prom_memory_limit) = 0;
} else if (RELOC(prom_memory_limit) >= RELOC(ram_top)) {
prom_printf("Ignoring mem=%x >= ram_top.\n",
RELOC(prom_memory_limit));
RELOC(prom_memory_limit) = 0;
} else {
RELOC(ram_top) = RELOC(prom_memory_limit);
RELOC(rmo_top) = min(RELOC(rmo_top), RELOC(prom_memory_limit));
}
}
/* /*
* Setup our top alloc point, that is top of RMO or top of * Setup our top alloc point, that is top of RMO or top of
* segment 0 when running non-LPAR. * segment 0 when running non-LPAR.
...@@ -1150,20 +1081,14 @@ static void __init prom_init_mem(void) ...@@ -1150,20 +1081,14 @@ static void __init prom_init_mem(void)
RELOC(rmo_top) = RELOC(ram_top); RELOC(rmo_top) = RELOC(ram_top);
RELOC(rmo_top) = min(0x30000000ul, RELOC(rmo_top)); RELOC(rmo_top) = min(0x30000000ul, RELOC(rmo_top));
RELOC(alloc_top) = RELOC(rmo_top); RELOC(alloc_top) = RELOC(rmo_top);
RELOC(alloc_top_high) = RELOC(ram_top);
prom_printf("memory layout at init:\n"); prom_printf("memory layout at init:\n");
prom_printf(" memory_limit : %x (16 MB aligned)\n", RELOC(prom_memory_limit));
prom_printf(" alloc_bottom : %x\n", RELOC(alloc_bottom)); prom_printf(" alloc_bottom : %x\n", RELOC(alloc_bottom));
prom_printf(" alloc_top : %x\n", RELOC(alloc_top)); prom_printf(" alloc_top : %x\n", RELOC(alloc_top));
prom_printf(" alloc_top_hi : %x\n", RELOC(alloc_top_high)); prom_printf(" alloc_top_hi : %x\n", RELOC(alloc_top_high));
prom_printf(" rmo_top : %x\n", RELOC(rmo_top)); prom_printf(" rmo_top : %x\n", RELOC(rmo_top));
prom_printf(" ram_top : %x\n", RELOC(ram_top)); prom_printf(" ram_top : %x\n", RELOC(ram_top));
#ifdef CONFIG_KEXEC
if (RELOC(prom_crashk_base)) {
prom_printf(" crashk_base : %x\n", RELOC(prom_crashk_base));
prom_printf(" crashk_size : %x\n", RELOC(prom_crashk_size));
}
#endif
} }
...@@ -1349,16 +1274,10 @@ static void __init prom_initialize_tce_table(void) ...@@ -1349,16 +1274,10 @@ static void __init prom_initialize_tce_table(void)
reserve_mem(local_alloc_bottom, local_alloc_top - local_alloc_bottom); reserve_mem(local_alloc_bottom, local_alloc_top - local_alloc_bottom);
if (RELOC(prom_memory_limit)) { /* These are only really needed if there is a memory limit in
/* * effect, but we don't know so export them always. */
* We align the start to a 16MB boundary so we can map RELOC(prom_tce_alloc_start) = local_alloc_bottom;
* the TCE area using large pages if possible.
* The end should be the top of RAM so no need to align it.
*/
RELOC(prom_tce_alloc_start) = _ALIGN_DOWN(local_alloc_bottom,
0x1000000);
RELOC(prom_tce_alloc_end) = local_alloc_top; RELOC(prom_tce_alloc_end) = local_alloc_top;
}
/* Flag the first invalid entry */ /* Flag the first invalid entry */
prom_debug("ending prom_initialize_tce_table\n"); prom_debug("ending prom_initialize_tce_table\n");
...@@ -2041,11 +1960,7 @@ static void __init flatten_device_tree(void) ...@@ -2041,11 +1960,7 @@ static void __init flatten_device_tree(void)
/* Version 16 is not backward compatible */ /* Version 16 is not backward compatible */
hdr->last_comp_version = 0x10; hdr->last_comp_version = 0x10;
/* Reserve the whole thing and copy the reserve map in, we /* Copy the reserve map in */
* also bump mem_reserve_cnt to cause further reservations to
* fail since it's too late.
*/
reserve_mem(RELOC(dt_header_start), hdr->totalsize);
memcpy(rsvmap, RELOC(mem_reserve_map), sizeof(mem_reserve_map)); memcpy(rsvmap, RELOC(mem_reserve_map), sizeof(mem_reserve_map));
#ifdef DEBUG_PROM #ifdef DEBUG_PROM
...@@ -2058,6 +1973,9 @@ static void __init flatten_device_tree(void) ...@@ -2058,6 +1973,9 @@ static void __init flatten_device_tree(void)
RELOC(mem_reserve_map)[i].size); RELOC(mem_reserve_map)[i].size);
} }
#endif #endif
/* Bump mem_reserve_cnt to cause further reservations to fail
* since it's too late.
*/
RELOC(mem_reserve_cnt) = MEM_RESERVE_MAP_SIZE; RELOC(mem_reserve_cnt) = MEM_RESERVE_MAP_SIZE;
prom_printf("Device tree strings 0x%x -> 0x%x\n", prom_printf("Device tree strings 0x%x -> 0x%x\n",
...@@ -2280,10 +2198,6 @@ unsigned long __init prom_init(unsigned long r3, unsigned long r4, ...@@ -2280,10 +2198,6 @@ unsigned long __init prom_init(unsigned long r3, unsigned long r4,
*/ */
prom_init_mem(); prom_init_mem();
#ifdef CONFIG_KEXEC
if (RELOC(prom_crashk_base))
reserve_mem(RELOC(prom_crashk_base), RELOC(prom_crashk_size));
#endif
/* /*
* Determine which cpu is actually running right _now_ * Determine which cpu is actually running right _now_
*/ */
...@@ -2317,10 +2231,6 @@ unsigned long __init prom_init(unsigned long r3, unsigned long r4, ...@@ -2317,10 +2231,6 @@ unsigned long __init prom_init(unsigned long r3, unsigned long r4,
/* /*
* Fill in some infos for use by the kernel later on * Fill in some infos for use by the kernel later on
*/ */
if (RELOC(prom_memory_limit))
prom_setprop(_prom->chosen, "/chosen", "linux,memory-limit",
&RELOC(prom_memory_limit),
sizeof(prom_memory_limit));
#ifdef CONFIG_PPC64 #ifdef CONFIG_PPC64
if (RELOC(ppc64_iommu_off)) if (RELOC(ppc64_iommu_off))
prom_setprop(_prom->chosen, "/chosen", "linux,iommu-off", prom_setprop(_prom->chosen, "/chosen", "linux,iommu-off",
...@@ -2340,16 +2250,6 @@ unsigned long __init prom_init(unsigned long r3, unsigned long r4, ...@@ -2340,16 +2250,6 @@ unsigned long __init prom_init(unsigned long r3, unsigned long r4,
} }
#endif #endif
#ifdef CONFIG_KEXEC
if (RELOC(prom_crashk_base)) {
prom_setprop(_prom->chosen, "/chosen", "linux,crashkernel-base",
PTRRELOC(&prom_crashk_base),
sizeof(RELOC(prom_crashk_base)));
prom_setprop(_prom->chosen, "/chosen", "linux,crashkernel-size",
PTRRELOC(&prom_crashk_size),
sizeof(RELOC(prom_crashk_size)));
}
#endif
/* /*
* Fixup any known bugs in the device-tree * Fixup any known bugs in the device-tree
*/ */
......
...@@ -548,3 +548,28 @@ int of_pci_address_to_resource(struct device_node *dev, int bar, ...@@ -548,3 +548,28 @@ int of_pci_address_to_resource(struct device_node *dev, int bar,
return __of_address_to_resource(dev, addrp, size, flags, r); return __of_address_to_resource(dev, addrp, size, flags, r);
} }
EXPORT_SYMBOL_GPL(of_pci_address_to_resource); EXPORT_SYMBOL_GPL(of_pci_address_to_resource);
void of_parse_dma_window(struct device_node *dn, unsigned char *dma_window_prop,
unsigned long *busno, unsigned long *phys, unsigned long *size)
{
u32 *dma_window, cells;
unsigned char *prop;
dma_window = (u32 *)dma_window_prop;
/* busno is always one cell */
*busno = *(dma_window++);
prop = get_property(dn, "ibm,#dma-address-cells", NULL);
if (!prop)
prop = get_property(dn, "#address-cells", NULL);
cells = prop ? *(u32 *)prop : prom_n_addr_cells(dn);
*phys = of_read_addr(dma_window, cells);
dma_window += cells;
prop = get_property(dn, "ibm,#dma-size-cells", NULL);
cells = prop ? *(u32 *)prop : prom_n_size_cells(dn);
*size = of_read_addr(dma_window, cells);
}
...@@ -404,7 +404,6 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data) ...@@ -404,7 +404,6 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
ret = ptrace_detach(child, data); ret = ptrace_detach(child, data);
break; break;
#ifdef CONFIG_PPC64
case PPC_PTRACE_GETREGS: { /* Get GPRs 0 - 31. */ case PPC_PTRACE_GETREGS: { /* Get GPRs 0 - 31. */
int i; int i;
unsigned long *reg = &((unsigned long *)child->thread.regs)[0]; unsigned long *reg = &((unsigned long *)child->thread.regs)[0];
...@@ -468,7 +467,6 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data) ...@@ -468,7 +467,6 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
} }
break; break;
} }
#endif /* CONFIG_PPC64 */
#ifdef CONFIG_ALTIVEC #ifdef CONFIG_ALTIVEC
case PTRACE_GETVRREGS: case PTRACE_GETVRREGS:
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
...@@ -313,7 +313,9 @@ unsigned long __init find_and_init_phbs(void) ...@@ -313,7 +313,9 @@ unsigned long __init find_and_init_phbs(void)
for (node = of_get_next_child(root, NULL); for (node = of_get_next_child(root, NULL);
node != NULL; node != NULL;
node = of_get_next_child(root, node)) { node = of_get_next_child(root, node)) {
if (node->type == NULL || strcmp(node->type, "pci") != 0)
if (node->type == NULL || (strcmp(node->type, "pci") != 0 &&
strcmp(node->type, "pciex") != 0))
continue; continue;
phb = pcibios_alloc_controller(node); phb = pcibios_alloc_controller(node);
......
...@@ -443,6 +443,7 @@ void __init smp_setup_cpu_maps(void) ...@@ -443,6 +443,7 @@ void __init smp_setup_cpu_maps(void)
} }
#endif /* CONFIG_SMP */ #endif /* CONFIG_SMP */
int __initdata do_early_xmon;
#ifdef CONFIG_XMON #ifdef CONFIG_XMON
static int __init early_xmon(char *p) static int __init early_xmon(char *p)
{ {
...@@ -456,7 +457,7 @@ static int __init early_xmon(char *p) ...@@ -456,7 +457,7 @@ static int __init early_xmon(char *p)
return 0; return 0;
} }
xmon_init(1); xmon_init(1);
debugger(NULL); do_early_xmon = 1;
return 0; return 0;
} }
...@@ -524,3 +525,20 @@ int check_legacy_ioport(unsigned long base_port) ...@@ -524,3 +525,20 @@ int check_legacy_ioport(unsigned long base_port)
return ppc_md.check_legacy_ioport(base_port); return ppc_md.check_legacy_ioport(base_port);
} }
EXPORT_SYMBOL(check_legacy_ioport); EXPORT_SYMBOL(check_legacy_ioport);
static int ppc_panic_event(struct notifier_block *this,
unsigned long event, void *ptr)
{
ppc_md.panic(ptr); /* May not return */
return NOTIFY_DONE;
}
static struct notifier_block ppc_panic_block = {
.notifier_call = ppc_panic_event,
.priority = INT_MIN /* may not return; must be done last */
};
void __init setup_panic(void)
{
atomic_notifier_chain_register(&panic_notifier_list, &ppc_panic_block);
}
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment