Commit 45c091bb authored by Linus Torvalds's avatar Linus Torvalds

Merge git://git.kernel.org/pub/scm/linux/kernel/git/paulus/powerpc

* git://git.kernel.org/pub/scm/linux/kernel/git/paulus/powerpc: (139 commits)
  [POWERPC] re-enable OProfile for iSeries, using timer interrupt
  [POWERPC] support ibm,extended-*-frequency properties
  [POWERPC] Extra sanity check in EEH code
  [POWERPC] Dont look for class-code in pci children
  [POWERPC] Fix mdelay badness on shared processor partitions
  [POWERPC] disable floating point exceptions for init
  [POWERPC] Unify ppc syscall tables
  [POWERPC] mpic: add support for serial mode interrupts
  [POWERPC] pseries: Print PCI slot location code on failure
  [POWERPC] spufs: one more fix for 64k pages
  [POWERPC] spufs: fail spu_create with invalid flags
  [POWERPC] spufs: clear class2 interrupt status before wakeup
  [POWERPC] spufs: fix Makefile for "make clean"
  [POWERPC] spufs: remove stop_code from struct spu
  [POWERPC] spufs: fix spu irq affinity setting
  [POWERPC] spufs: further abstract priv1 register access
  [POWERPC] spufs: split the Cell BE support into generic and platform dependant parts
  [POWERPC] spufs: dont try to access SPE channel 1 count
  [POWERPC] spufs: use kzalloc in create_spu
  [POWERPC] spufs: fix initial state of wbox file
  ...

Manually resolved conflicts in:
	drivers/net/phy/Makefile
	include/asm-powerpc/spu.h
parents d588fcbe 2191fe3e
......@@ -45,6 +45,10 @@ config GENERIC_CALIBRATE_DELAY
bool
default y
config GENERIC_FIND_NEXT_BIT
bool
default y
config PPC
bool
default y
......@@ -137,6 +141,15 @@ config PPC_85xx
select FSL_SOC
select 85xx
config PPC_86xx
bool "Freescale 86xx"
select 6xx
select FSL_SOC
select PPC_FPU
select ALTIVEC
help
The Freescale E600 SoCs have 74xx cores.
config 40x
bool "AMCC 40x"
......@@ -336,7 +349,7 @@ endchoice
config PPC_PSERIES
depends on PPC_MULTIPLATFORM && PPC64
bool " IBM pSeries & new (POWER5-based) iSeries"
bool "IBM pSeries & new (POWER5-based) iSeries"
select PPC_I8259
select PPC_RTAS
select RTAS_ERROR_LOGGING
......@@ -344,7 +357,7 @@ config PPC_PSERIES
default y
config PPC_CHRP
bool " Common Hardware Reference Platform (CHRP) based machines"
bool "Common Hardware Reference Platform (CHRP) based machines"
depends on PPC_MULTIPLATFORM && PPC32
select PPC_I8259
select PPC_INDIRECT_PCI
......@@ -354,7 +367,7 @@ config PPC_CHRP
default y
config PPC_PMAC
bool " Apple PowerMac based machines"
bool "Apple PowerMac based machines"
depends on PPC_MULTIPLATFORM
select PPC_INDIRECT_PCI if PPC32
select PPC_MPC106 if PPC32
......@@ -370,7 +383,7 @@ config PPC_PMAC64
default y
config PPC_PREP
bool " PowerPC Reference Platform (PReP) based machines"
bool "PowerPC Reference Platform (PReP) based machines"
depends on PPC_MULTIPLATFORM && PPC32 && BROKEN
select PPC_I8259
select PPC_INDIRECT_PCI
......@@ -379,7 +392,7 @@ config PPC_PREP
config PPC_MAPLE
depends on PPC_MULTIPLATFORM && PPC64
bool " Maple 970FX Evaluation Board"
bool "Maple 970FX Evaluation Board"
select U3_DART
select MPIC_BROKEN_U3
select GENERIC_TBSYNC
......@@ -391,8 +404,18 @@ config PPC_MAPLE
For more informations, refer to <http://www.970eval.com>
config PPC_CELL
bool " Cell Broadband Processor Architecture"
bool
default n
config PPC_CELL_NATIVE
bool
select PPC_CELL
default n
config PPC_IBM_CELL_BLADE
bool " IBM Cell Blade"
depends on PPC_MULTIPLATFORM && PPC64
select PPC_CELL_NATIVE
select PPC_RTAS
select MMIO_NVRAM
select PPC_UDBG_16550
......@@ -439,11 +462,6 @@ config MPIC_BROKEN_U3
depends on PPC_MAPLE
default y
config CELL_IIC
depends on PPC_CELL
bool
default y
config IBMVIO
depends on PPC_PSERIES || PPC_ISERIES
bool
......@@ -545,6 +563,7 @@ source arch/powerpc/platforms/embedded6xx/Kconfig
source arch/powerpc/platforms/4xx/Kconfig
source arch/powerpc/platforms/83xx/Kconfig
source arch/powerpc/platforms/85xx/Kconfig
source arch/powerpc/platforms/86xx/Kconfig
source arch/powerpc/platforms/8xx/Kconfig
source arch/powerpc/platforms/cell/Kconfig
......@@ -776,6 +795,7 @@ config GENERIC_ISA_DMA
config PPC_I8259
bool
default y if MPC8641_HPCN
default n
config PPC_INDIRECT_PCI
......@@ -798,8 +818,8 @@ config MCA
bool
config PCI
bool "PCI support" if 40x || CPM2 || PPC_83xx || PPC_85xx || PPC_MPC52xx || (EMBEDDED && PPC_ISERIES)
default y if !40x && !CPM2 && !8xx && !APUS && !PPC_83xx && !PPC_85xx
bool "PCI support" if 40x || CPM2 || PPC_83xx || PPC_85xx || PPC_86xx || PPC_MPC52xx || (EMBEDDED && PPC_ISERIES)
default y if !40x && !CPM2 && !8xx && !APUS && !PPC_83xx && !PPC_85xx && !PPC_86xx
default PCI_PERMEDIA if !4xx && !CPM2 && !8xx && APUS
default PCI_QSPAN if !4xx && !CPM2 && 8xx
help
......@@ -827,12 +847,12 @@ config PCI_8260
default y
config 8260_PCI9
bool " Enable workaround for MPC826x erratum PCI 9"
bool "Enable workaround for MPC826x erratum PCI 9"
depends on PCI_8260 && !ADS8272
default y
choice
prompt " IDMA channel for PCI 9 workaround"
prompt "IDMA channel for PCI 9 workaround"
depends on 8260_PCI9
config 8260_PCI9_IDMA1
......@@ -849,6 +869,8 @@ config 8260_PCI9_IDMA4
endchoice
source "drivers/pci/pcie/Kconfig"
source "drivers/pci/Kconfig"
source "drivers/pcmcia/Kconfig"
......
......@@ -110,13 +110,16 @@ config SERIAL_TEXT_DEBUG
depends on 4xx || LOPEC || MV64X60 || PPLUS || PRPMC800 || \
PPC_GEN550 || PPC_MPC52xx
config PPC_EARLY_DEBUG
bool "Early debugging (dangerous)"
choice
prompt "Early debugging (dangerous)"
bool
optional
prompt "Early debugging console"
depends on PPC_EARLY_DEBUG
help
Enable early debugging. Careful, if you enable debugging for the
wrong type of machine your kernel _will not boot_.
Use the selected console for early debugging. Careful, if you
enable debugging for the wrong type of machine your kernel
_will not boot_.
config PPC_EARLY_DEBUG_LPAR
bool "LPAR HV Console"
......
......@@ -108,7 +108,6 @@ ifeq ($(CONFIG_6xx),y)
CFLAGS += -mcpu=powerpc
endif
cpu-as-$(CONFIG_PPC64BRIDGE) += -Wa,-mppc64bridge
cpu-as-$(CONFIG_4xx) += -Wa,-m405
cpu-as-$(CONFIG_6xx) += -Wa,-maltivec
cpu-as-$(CONFIG_POWER4) += -Wa,-maltivec
......
......@@ -33,6 +33,14 @@ extern char _vmlinux_end[];
extern char _initrd_start[];
extern char _initrd_end[];
/* A buffer that may be edited by tools operating on a zImage binary so as to
* edit the command line passed to vmlinux (by setting /chosen/bootargs).
* The buffer is put in it's own section so that tools may locate it easier.
*/
static char builtin_cmdline[512]
__attribute__((section("__builtin_cmdline")));
struct addr_range {
unsigned long addr;
unsigned long size;
......@@ -204,6 +212,23 @@ static int is_elf32(void *hdr)
return 1;
}
void export_cmdline(void* chosen_handle)
{
int len;
char cmdline[2] = { 0, 0 };
if (builtin_cmdline[0] == 0)
return;
len = getprop(chosen_handle, "bootargs", cmdline, sizeof(cmdline));
if (len > 0 && cmdline[0] != 0)
return;
setprop(chosen_handle, "bootargs", builtin_cmdline,
strlen(builtin_cmdline) + 1);
}
void start(unsigned long a1, unsigned long a2, void *promptr, void *sp)
{
int len;
......@@ -289,6 +314,8 @@ void start(unsigned long a1, unsigned long a2, void *promptr, void *sp)
memmove((void *)vmlinux.addr,(void *)vmlinuz.addr,vmlinuz.size);
}
export_cmdline(chosen_handle);
/* Skip over the ELF header */
#ifdef DEBUG
printf("... skipping 0x%lx bytes of ELF header\n\r",
......
......@@ -31,4 +31,11 @@ static inline int getprop(void *phandle, const char *name,
return call_prom("getprop", 4, 1, phandle, name, buf, buflen);
}
static inline int setprop(void *phandle, const char *name,
void *buf, int buflen)
{
return call_prom("setprop", 4, 1, phandle, name, buf, buflen);
}
#endif /* _PPC_BOOT_PROM_H_ */
#
# Automatically generated make config: don't edit
# Linux kernel version: 2.6.16
# Thu Mar 23 20:48:09 2006
# Linux kernel version: 2.6.17
# Mon Jun 19 17:23:03 2006
#
CONFIG_PPC64=y
CONFIG_64BIT=y
......@@ -11,6 +11,7 @@ CONFIG_GENERIC_HARDIRQS=y
CONFIG_RWSEM_XCHGADD_ALGORITHM=y
CONFIG_GENERIC_HWEIGHT=y
CONFIG_GENERIC_CALIBRATE_DELAY=y
CONFIG_GENERIC_FIND_NEXT_BIT=y
CONFIG_PPC=y
CONFIG_EARLY_PRINTK=y
CONFIG_COMPAT=y
......@@ -55,7 +56,7 @@ CONFIG_SYSCTL=y
# CONFIG_AUDIT is not set
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
# CONFIG_CPUSETS is not set
CONFIG_CPUSETS=y
# CONFIG_RELAY is not set
CONFIG_INITRAMFS_SOURCE=""
CONFIG_CC_OPTIMIZE_FOR_SIZE=y
......@@ -116,13 +117,15 @@ CONFIG_PPC_MULTIPLATFORM=y
# CONFIG_PPC_PMAC is not set
# CONFIG_PPC_MAPLE is not set
CONFIG_PPC_CELL=y
CONFIG_PPC_CELL_NATIVE=y
CONFIG_PPC_IBM_CELL_BLADE=y
CONFIG_PPC_SYSTEMSIM=y
# CONFIG_U3_DART is not set
CONFIG_PPC_RTAS=y
# CONFIG_RTAS_ERROR_LOGGING is not set
CONFIG_RTAS_PROC=y
CONFIG_RTAS_FLASH=y
CONFIG_MMIO_NVRAM=y
CONFIG_CELL_IIC=y
# CONFIG_PPC_MPC106 is not set
# CONFIG_PPC_970_NAP is not set
# CONFIG_CPU_FREQ is not set
......@@ -132,7 +135,9 @@ CONFIG_CELL_IIC=y
# Cell Broadband Engine options
#
CONFIG_SPU_FS=m
CONFIG_SPU_BASE=y
CONFIG_SPUFS_MMAP=y
CONFIG_CBE_RAS=y
#
# Kernel options
......@@ -152,20 +157,24 @@ CONFIG_FORCE_MAX_ZONEORDER=13
CONFIG_KEXEC=y
# CONFIG_CRASH_DUMP is not set
CONFIG_IRQ_ALL_CPUS=y
# CONFIG_NUMA is not set
CONFIG_NUMA=y
CONFIG_NODES_SHIFT=4
CONFIG_ARCH_SELECT_MEMORY_MODEL=y
CONFIG_ARCH_FLATMEM_ENABLE=y
CONFIG_ARCH_SPARSEMEM_ENABLE=y
CONFIG_SELECT_MEMORY_MODEL=y
# CONFIG_FLATMEM_MANUAL is not set
# CONFIG_DISCONTIGMEM_MANUAL is not set
CONFIG_SPARSEMEM_MANUAL=y
CONFIG_SPARSEMEM=y
CONFIG_NEED_MULTIPLE_NODES=y
CONFIG_HAVE_MEMORY_PRESENT=y
# CONFIG_SPARSEMEM_STATIC is not set
CONFIG_SPARSEMEM_EXTREME=y
# CONFIG_MEMORY_HOTPLUG is not set
CONFIG_MEMORY_HOTPLUG=y
CONFIG_SPLIT_PTLOCK_CPUS=4
CONFIG_MIGRATION=y
CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID=y
CONFIG_ARCH_MEMORY_PROBE=y
# CONFIG_PPC_64K_PAGES is not set
CONFIG_SCHED_SMT=y
CONFIG_PROC_DEVICETREE=y
......@@ -182,6 +191,7 @@ CONFIG_GENERIC_ISA_DMA=y
# CONFIG_PPC_INDIRECT_PCI is not set
CONFIG_PCI=y
CONFIG_PCI_DOMAINS=y
CONFIG_PCIEPORTBUS=y
# CONFIG_PCI_DEBUG is not set
#
......@@ -476,7 +486,7 @@ CONFIG_DM_MULTIPATH=m
#
CONFIG_NETDEVICES=y
# CONFIG_DUMMY is not set
# CONFIG_BONDING is not set
CONFIG_BONDING=y
# CONFIG_EQUALIZER is not set
# CONFIG_TUN is not set
......@@ -624,6 +634,7 @@ CONFIG_SERIAL_NONSTANDARD=y
# CONFIG_N_HDLC is not set
# CONFIG_SPECIALIX is not set
# CONFIG_SX is not set
# CONFIG_RIO is not set
# CONFIG_STALDRV is not set
#
......@@ -766,6 +777,7 @@ CONFIG_I2C_ALGOBIT=y
# Multimedia devices
#
# CONFIG_VIDEO_DEV is not set
CONFIG_VIDEO_V4L2=y
#
# Digital Video Broadcasting Devices
......@@ -1054,11 +1066,7 @@ CONFIG_DEBUGGER=y
# CONFIG_XMON is not set
CONFIG_IRQSTACKS=y
# CONFIG_BOOTX_TEXT is not set
# CONFIG_PPC_EARLY_DEBUG_LPAR is not set
# CONFIG_PPC_EARLY_DEBUG_G5 is not set
# CONFIG_PPC_EARLY_DEBUG_RTAS is not set
# CONFIG_PPC_EARLY_DEBUG_MAPLE is not set
# CONFIG_PPC_EARLY_DEBUG_ISERIES is not set
# CONFIG_PPC_EARLY_DEBUG is not set
#
# Security options
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
#
# Automatically generated make config: don't edit
# Linux kernel version: 2.6.17-rc1
# Wed Apr 19 11:48:00 2006
# Linux kernel version: 2.6.17-rc4
# Sun May 28 07:26:56 2006
#
CONFIG_PPC64=y
CONFIG_64BIT=y
......@@ -11,6 +11,7 @@ CONFIG_GENERIC_HARDIRQS=y
CONFIG_RWSEM_XCHGADD_ALGORITHM=y
CONFIG_GENERIC_HWEIGHT=y
CONFIG_GENERIC_CALIBRATE_DELAY=y
CONFIG_GENERIC_FIND_NEXT_BIT=y
CONFIG_PPC=y
CONFIG_EARLY_PRINTK=y
CONFIG_COMPAT=y
......@@ -126,8 +127,9 @@ CONFIG_RTAS_PROC=y
CONFIG_RTAS_FLASH=m
# CONFIG_MMIO_NVRAM is not set
CONFIG_IBMVIO=y
# CONFIG_IBMEBUS is not set
CONFIG_IBMEBUS=y
# CONFIG_PPC_MPC106 is not set
# CONFIG_PPC_970_NAP is not set
# CONFIG_CPU_FREQ is not set
# CONFIG_WANT_EARLY_SERIAL is not set
......@@ -143,7 +145,7 @@ CONFIG_PREEMPT_NONE=y
# CONFIG_PREEMPT is not set
# CONFIG_PREEMPT_BKL is not set
CONFIG_BINFMT_ELF=y
# CONFIG_BINFMT_MISC is not set
CONFIG_BINFMT_MISC=m
CONFIG_FORCE_MAX_ZONEORDER=13
CONFIG_IOMMU_VMERGE=y
CONFIG_HOTPLUG_CPU=y
......@@ -155,6 +157,7 @@ CONFIG_EEH=y
CONFIG_SCANLOG=m
CONFIG_LPARCFG=y
CONFIG_NUMA=y
CONFIG_NODES_SHIFT=4
CONFIG_ARCH_SELECT_MEMORY_MODEL=y
CONFIG_ARCH_SPARSEMEM_ENABLE=y
CONFIG_ARCH_SPARSEMEM_DEFAULT=y
......@@ -467,7 +470,7 @@ CONFIG_SCSI_CONSTANTS=y
CONFIG_SCSI_SPI_ATTRS=y
CONFIG_SCSI_FC_ATTRS=y
CONFIG_SCSI_ISCSI_ATTRS=m
# CONFIG_SCSI_SAS_ATTRS is not set
CONFIG_SCSI_SAS_ATTRS=m
#
# SCSI low-level drivers
......@@ -499,13 +502,18 @@ CONFIG_SCSI_SYM53C8XX_2=y
CONFIG_SCSI_SYM53C8XX_DMA_ADDRESSING_MODE=0
CONFIG_SCSI_SYM53C8XX_DEFAULT_TAGS=16
CONFIG_SCSI_SYM53C8XX_MAX_TAGS=64
# CONFIG_SCSI_SYM53C8XX_IOMAPPED is not set
CONFIG_SCSI_SYM53C8XX_MMIO=y
CONFIG_SCSI_IPR=y
CONFIG_SCSI_IPR_TRACE=y
CONFIG_SCSI_IPR_DUMP=y
# CONFIG_SCSI_QLOGIC_FC is not set
# CONFIG_SCSI_QLOGIC_1280 is not set
# CONFIG_SCSI_QLA_FC is not set
CONFIG_SCSI_QLA_FC=m
CONFIG_SCSI_QLA2XXX_EMBEDDED_FIRMWARE=y
CONFIG_SCSI_QLA21XX=m
CONFIG_SCSI_QLA22XX=m
CONFIG_SCSI_QLA2300=m
CONFIG_SCSI_QLA2322=m
CONFIG_SCSI_QLA24XX=m
CONFIG_SCSI_LPFC=m
# CONFIG_SCSI_DC395x is not set
# CONFIG_SCSI_DC390T is not set
......@@ -521,7 +529,7 @@ CONFIG_MD_RAID0=y
CONFIG_MD_RAID1=y
CONFIG_MD_RAID10=m
CONFIG_MD_RAID5=y
# CONFIG_MD_RAID5_RESHAPE is not set
CONFIG_MD_RAID5_RESHAPE=y
CONFIG_MD_RAID6=m
CONFIG_MD_MULTIPATH=m
CONFIG_MD_FAULTY=m
......@@ -764,7 +772,7 @@ CONFIG_SERIAL_8250_RUNTIME_UARTS=4
CONFIG_SERIAL_CORE=y
CONFIG_SERIAL_CORE_CONSOLE=y
CONFIG_SERIAL_ICOM=m
# CONFIG_SERIAL_JSM is not set
CONFIG_SERIAL_JSM=m
CONFIG_UNIX98_PTYS=y
CONFIG_LEGACY_PTYS=y
CONFIG_LEGACY_PTY_COUNT=256
......@@ -773,7 +781,7 @@ CONFIG_LEGACY_PTY_COUNT=256
# CONFIG_TIPAR is not set
CONFIG_HVC_DRIVER=y
CONFIG_HVC_CONSOLE=y
# CONFIG_HVC_RTAS is not set
CONFIG_HVC_RTAS=y
CONFIG_HVCS=m
#
......@@ -1031,9 +1039,7 @@ CONFIG_USB_HIDDEV=y
# CONFIG_USB_ACECAD is not set
# CONFIG_USB_KBTAB is not set
# CONFIG_USB_POWERMATE is not set
# CONFIG_USB_MTOUCH is not set
# CONFIG_USB_ITMTOUCH is not set
# CONFIG_USB_EGALAX is not set
# CONFIG_USB_TOUCHSCREEN is not set
# CONFIG_USB_YEALINK is not set
# CONFIG_USB_XPAD is not set
# CONFIG_USB_ATI_REMOTE is not set
......@@ -1104,17 +1110,26 @@ CONFIG_USB_MON=y
#
# CONFIG_NEW_LEDS is not set
#
# LED drivers
#
#
# LED Triggers
#
#
# InfiniBand support
#
CONFIG_INFINIBAND=m
# CONFIG_INFINIBAND_USER_MAD is not set
# CONFIG_INFINIBAND_USER_ACCESS is not set
CONFIG_INFINIBAND_USER_MAD=m
CONFIG_INFINIBAND_USER_ACCESS=m
CONFIG_INFINIBAND_MTHCA=m
# CONFIG_INFINIBAND_MTHCA_DEBUG is not set
CONFIG_INFINIBAND_MTHCA_DEBUG=y
CONFIG_INFINIBAND_IPOIB=m
# CONFIG_INFINIBAND_IPOIB_DEBUG is not set
# CONFIG_INFINIBAND_SRP is not set
CONFIG_INFINIBAND_IPOIB_DEBUG=y
# CONFIG_INFINIBAND_IPOIB_DEBUG_DATA is not set
CONFIG_INFINIBAND_SRP=m
#
# EDAC - error detection and reporting (RAS) (EXPERIMENTAL)
......@@ -1159,15 +1174,15 @@ CONFIG_XFS_EXPORT=y
CONFIG_XFS_SECURITY=y
CONFIG_XFS_POSIX_ACL=y
# CONFIG_XFS_RT is not set
# CONFIG_OCFS2_FS is not set
CONFIG_OCFS2_FS=m
# CONFIG_MINIX_FS is not set
# CONFIG_ROMFS_FS is not set
CONFIG_INOTIFY=y
# CONFIG_QUOTA is not set
CONFIG_DNOTIFY=y
CONFIG_AUTOFS_FS=m
# CONFIG_AUTOFS4_FS is not set
# CONFIG_FUSE_FS is not set
# CONFIG_AUTOFS_FS is not set
CONFIG_AUTOFS4_FS=m
CONFIG_FUSE_FS=m
#
# CD-ROM/DVD Filesystems
......@@ -1199,7 +1214,7 @@ CONFIG_TMPFS=y
CONFIG_HUGETLBFS=y
CONFIG_HUGETLB_PAGE=y
CONFIG_RAMFS=y
# CONFIG_CONFIGFS_FS is not set
CONFIG_CONFIGFS_FS=m
#
# Miscellaneous filesystems
......@@ -1317,7 +1332,7 @@ CONFIG_ZLIB_DEFLATE=m
#
CONFIG_PROFILING=y
CONFIG_OPROFILE=y
# CONFIG_KPROBES is not set
CONFIG_KPROBES=y
#
# Kernel hacking
......@@ -1329,7 +1344,7 @@ CONFIG_LOG_BUF_SHIFT=17
CONFIG_DETECT_SOFTLOCKUP=y
# CONFIG_SCHEDSTATS is not set
# CONFIG_DEBUG_SLAB is not set
CONFIG_DEBUG_MUTEXES=y
# CONFIG_DEBUG_MUTEXES is not set
# CONFIG_DEBUG_SPINLOCK is not set
# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
# CONFIG_DEBUG_KOBJECT is not set
......@@ -1339,17 +1354,13 @@ CONFIG_DEBUG_FS=y
CONFIG_FORCED_INLINING=y
# CONFIG_RCU_TORTURE_TEST is not set
CONFIG_DEBUG_STACKOVERFLOW=y
CONFIG_DEBUG_STACK_USAGE=y
# CONFIG_DEBUG_STACK_USAGE is not set
CONFIG_DEBUGGER=y
CONFIG_XMON=y
CONFIG_XMON_DEFAULT=y
CONFIG_IRQSTACKS=y
# CONFIG_BOOTX_TEXT is not set
# CONFIG_PPC_EARLY_DEBUG_LPAR is not set
# CONFIG_PPC_EARLY_DEBUG_G5 is not set
# CONFIG_PPC_EARLY_DEBUG_RTAS is not set
# CONFIG_PPC_EARLY_DEBUG_MAPLE is not set
# CONFIG_PPC_EARLY_DEBUG_ISERIES is not set
# CONFIG_PPC_EARLY_DEBUG is not set
#
# Security options
......
......@@ -35,17 +35,19 @@ struct aligninfo {
#define INVALID { 0, 0 }
#define LD 1 /* load */
#define ST 2 /* store */
#define SE 4 /* sign-extend value */
#define F 8 /* to/from fp regs */
#define U 0x10 /* update index register */
#define M 0x20 /* multiple load/store */
#define SW 0x40 /* byte swap int or ... */
#define S 0x40 /* ... single-precision fp */
#define SX 0x40 /* byte count in XER */
/* Bits in the flags field */
#define LD 0 /* load */
#define ST 1 /* store */
#define SE 2 /* sign-extend value */
#define F 4 /* to/from fp regs */
#define U 8 /* update index register */
#define M 0x10 /* multiple load/store */
#define SW 0x20 /* byte swap */
#define S 0x40 /* single-precision fp or... */
#define SX 0x40 /* ... byte count in XER */
#define HARD 0x80 /* string, stwcx. */
/* DSISR bits reported for a DCBZ instruction: */
#define DCBZ 0x5f /* 8xx/82xx dcbz faults when cache not enabled */
#define SWAP(a, b) (t = (a), (a) = (b), (b) = t)
......@@ -256,12 +258,16 @@ static int emulate_dcbz(struct pt_regs *regs, unsigned char __user *addr)
#define REG_BYTE(rp, i) *((u8 *)(rp) + (i))
#endif
#define SWIZ_PTR(p) ((unsigned char __user *)((p) ^ swiz))
static int emulate_multiple(struct pt_regs *regs, unsigned char __user *addr,
unsigned int reg, unsigned int nb,
unsigned int flags, unsigned int instr)
unsigned int flags, unsigned int instr,
unsigned long swiz)
{
unsigned long *rptr;
unsigned int nb0, i;
unsigned int nb0, i, bswiz;
unsigned long p;
/*
* We do not try to emulate 8 bytes multiple as they aren't really
......@@ -280,9 +286,12 @@ static int emulate_multiple(struct pt_regs *regs, unsigned char __user *addr,
if (nb == 0)
return 1;
} else {
if (__get_user(instr,
(unsigned int __user *)regs->nip))
unsigned long pc = regs->nip ^ (swiz & 4);
if (__get_user(instr, (unsigned int __user *)pc))
return -EFAULT;
if (swiz == 0 && (flags & SW))
instr = cpu_to_le32(instr);
nb = (instr >> 11) & 0x1f;
if (nb == 0)
nb = 32;
......@@ -300,7 +309,10 @@ static int emulate_multiple(struct pt_regs *regs, unsigned char __user *addr,
return -EFAULT; /* bad address */
rptr = &regs->gpr[reg];
if (flags & LD) {
p = (unsigned long) addr;
bswiz = (flags & SW)? 3: 0;
if (!(flags & ST)) {
/*
* This zeroes the top 4 bytes of the affected registers
* in 64-bit mode, and also zeroes out any remaining
......@@ -311,26 +323,28 @@ static int emulate_multiple(struct pt_regs *regs, unsigned char __user *addr,
memset(&regs->gpr[0], 0,
((nb0 + 3) / 4) * sizeof(unsigned long));
for (i = 0; i < nb; ++i)
if (__get_user(REG_BYTE(rptr, i), addr + i))
for (i = 0; i < nb; ++i, ++p)
if (__get_user(REG_BYTE(rptr, i ^ bswiz), SWIZ_PTR(p)))
return -EFAULT;
if (nb0 > 0) {
rptr = &regs->gpr[0];
addr += nb;
for (i = 0; i < nb0; ++i)
if (__get_user(REG_BYTE(rptr, i), addr + i))
for (i = 0; i < nb0; ++i, ++p)
if (__get_user(REG_BYTE(rptr, i ^ bswiz),
SWIZ_PTR(p)))
return -EFAULT;
}
} else {
for (i = 0; i < nb; ++i)
if (__put_user(REG_BYTE(rptr, i), addr + i))
for (i = 0; i < nb; ++i, ++p)
if (__put_user(REG_BYTE(rptr, i ^ bswiz), SWIZ_PTR(p)))
return -EFAULT;
if (nb0 > 0) {
rptr = &regs->gpr[0];
addr += nb;
for (i = 0; i < nb0; ++i)
if (__put_user(REG_BYTE(rptr, i), addr + i))
for (i = 0; i < nb0; ++i, ++p)
if (__put_user(REG_BYTE(rptr, i ^ bswiz),
SWIZ_PTR(p)))
return -EFAULT;
}
}
......@@ -352,7 +366,7 @@ int fix_alignment(struct pt_regs *regs)
unsigned int reg, areg;
unsigned int dsisr;
unsigned char __user *addr;
unsigned char __user *p;
unsigned long p, swiz;
int ret, t;
union {
u64 ll;
......@@ -380,11 +394,15 @@ int fix_alignment(struct pt_regs *regs)
* let's make one up from the instruction
*/
if (cpu_has_feature(CPU_FTR_NODSISRALIGN)) {
unsigned int real_instr;
if (unlikely(__get_user(real_instr,
(unsigned int __user *)regs->nip)))
unsigned long pc = regs->nip;
if (cpu_has_feature(CPU_FTR_PPC_LE) && (regs->msr & MSR_LE))
pc ^= 4;
if (unlikely(__get_user(instr, (unsigned int __user *)pc)))
return -EFAULT;
dsisr = make_dsisr(real_instr);
if (cpu_has_feature(CPU_FTR_REAL_LE) && (regs->msr & MSR_LE))
instr = cpu_to_le32(instr);
dsisr = make_dsisr(instr);
}
/* extract the operation and registers from the dsisr */
......@@ -397,6 +415,24 @@ int fix_alignment(struct pt_regs *regs)
nb = aligninfo[instr].len;
flags = aligninfo[instr].flags;
/* Byteswap little endian loads and stores */
swiz = 0;
if (regs->msr & MSR_LE) {
flags ^= SW;
/*
* So-called "PowerPC little endian" mode works by
* swizzling addresses rather than by actually doing
* any byte-swapping. To emulate this, we XOR each
* byte address with 7. We also byte-swap, because
* the processor's address swizzling depends on the
* operand size (it xors the address with 7 for bytes,
* 6 for halfwords, 4 for words, 0 for doublewords) but
* we will xor with 7 and load/store each byte separately.
*/
if (cpu_has_feature(CPU_FTR_PPC_LE))
swiz = 7;
}
/* DAR has the operand effective address */
addr = (unsigned char __user *)regs->dar;
......@@ -412,7 +448,8 @@ int fix_alignment(struct pt_regs *regs)
* function
*/
if (flags & M)
return emulate_multiple(regs, addr, reg, nb, flags, instr);
return emulate_multiple(regs, addr, reg, nb,
flags, instr, swiz);
/* Verify the address of the operand */
if (unlikely(user_mode(regs) &&
......@@ -431,51 +468,71 @@ int fix_alignment(struct pt_regs *regs)
/* If we are loading, get the data from user space, else
* get it from register values
*/
if (flags & LD) {
if (!(flags & ST)) {
data.ll = 0;
ret = 0;
p = addr;
p = (unsigned long) addr;
switch (nb) {
case 8:
ret |= __get_user(data.v[0], p++);
ret |= __get_user(data.v[1], p++);
ret |= __get_user(data.v[2], p++);
ret |= __get_user(data.v[3], p++);
ret |= __get_user(data.v[0], SWIZ_PTR(p++));
ret |= __get_user(data.v[1], SWIZ_PTR(p++));
ret |= __get_user(data.v[2], SWIZ_PTR(p++));
ret |= __get_user(data.v[3], SWIZ_PTR(p++));
case 4:
ret |= __get_user(data.v[4], p++);
ret |= __get_user(data.v[5], p++);
ret |= __get_user(data.v[4], SWIZ_PTR(p++));
ret |= __get_user(data.v[5], SWIZ_PTR(p++));
case 2:
ret |= __get_user(data.v[6], p++);
ret |= __get_user(data.v[7], p++);
ret |= __get_user(data.v[6], SWIZ_PTR(p++));
ret |= __get_user(data.v[7], SWIZ_PTR(p++));
if (unlikely(ret))
return -EFAULT;
}
} else if (flags & F)
} else if (flags & F) {
data.dd = current->thread.fpr[reg];
else
if (flags & S) {
/* Single-precision FP store requires conversion... */
#ifdef CONFIG_PPC_FPU
preempt_disable();
enable_kernel_fp();
cvt_df(&data.dd, (float *)&data.v[4], &current->thread);
preempt_enable();
#else
return 0;
#endif
}
} else
data.ll = regs->gpr[reg];
/* Perform other misc operations like sign extension, byteswap,
if (flags & SW) {
switch (nb) {
case 8:
SWAP(data.v[0], data.v[7]);
SWAP(data.v[1], data.v[6]);
SWAP(data.v[2], data.v[5]);
SWAP(data.v[3], data.v[4]);
break;
case 4:
SWAP(data.v[4], data.v[7]);
SWAP(data.v[5], data.v[6]);
break;
case 2:
SWAP(data.v[6], data.v[7]);
break;
}
}
/* Perform other misc operations like sign extension
* or floating point single precision conversion
*/
switch (flags & ~U) {
switch (flags & ~(U|SW)) {
case LD+SE: /* sign extend */
if ( nb == 2 )
data.ll = data.x16.low16;
else /* nb must be 4 */
data.ll = data.x32.low32;
break;
case LD+S: /* byte-swap */
case ST+S:
if (nb == 2) {
SWAP(data.v[6], data.v[7]);
} else {
SWAP(data.v[4], data.v[7]);
SWAP(data.v[5], data.v[6]);
}
break;
/* Single-precision FP load and store require conversions... */
/* Single-precision FP load requires conversion... */
case LD+F+S:
#ifdef CONFIG_PPC_FPU
preempt_disable();
......@@ -484,16 +541,6 @@ int fix_alignment(struct pt_regs *regs)
preempt_enable();
#else
return 0;
#endif
break;
case ST+F+S:
#ifdef CONFIG_PPC_FPU
preempt_disable();
enable_kernel_fp();
cvt_df(&data.dd, (float *)&data.v[4], &current->thread);
preempt_enable();
#else
return 0;
#endif
break;
}
......@@ -501,19 +548,19 @@ int fix_alignment(struct pt_regs *regs)
/* Store result to memory or update registers */
if (flags & ST) {
ret = 0;
p = addr;
p = (unsigned long) addr;
switch (nb) {
case 8:
ret |= __put_user(data.v[0], p++);
ret |= __put_user(data.v[1], p++);
ret |= __put_user(data.v[2], p++);
ret |= __put_user(data.v[3], p++);
ret |= __put_user(data.v[0], SWIZ_PTR(p++));
ret |= __put_user(data.v[1], SWIZ_PTR(p++));
ret |= __put_user(data.v[2], SWIZ_PTR(p++));
ret |= __put_user(data.v[3], SWIZ_PTR(p++));
case 4:
ret |= __put_user(data.v[4], p++);
ret |= __put_user(data.v[5], p++);
ret |= __put_user(data.v[4], SWIZ_PTR(p++));
ret |= __put_user(data.v[5], SWIZ_PTR(p++));
case 2:
ret |= __put_user(data.v[6], p++);
ret |= __put_user(data.v[7], p++);
ret |= __put_user(data.v[6], SWIZ_PTR(p++));
ret |= __put_user(data.v[7], SWIZ_PTR(p++));
}
if (unlikely(ret))
return -EFAULT;
......
......@@ -122,9 +122,8 @@ int main(void)
DEFINE(PACASLBCACHE, offsetof(struct paca_struct, slb_cache));
DEFINE(PACASLBCACHEPTR, offsetof(struct paca_struct, slb_cache_ptr));
DEFINE(PACACONTEXTID, offsetof(struct paca_struct, context.id));
#ifdef CONFIG_PPC_64K_PAGES
DEFINE(PACAPGDIR, offsetof(struct paca_struct, pgdir));
#endif
DEFINE(PACACONTEXTSLLP, offsetof(struct paca_struct, context.sllp));
DEFINE(PACAVMALLOCSLLP, offsetof(struct paca_struct, vmalloc_sllp));
#ifdef CONFIG_HUGETLB_PAGE
DEFINE(PACALOWHTLBAREAS, offsetof(struct paca_struct, context.low_htlb_areas));
DEFINE(PACAHIGHHTLBAREAS, offsetof(struct paca_struct, context.high_htlb_areas));
......
......@@ -210,9 +210,11 @@ setup_745x_specifics:
* the firmware. If any, we disable NAP capability as
* it's known to be bogus on rev 2.1 and earlier
*/
BEGIN_FTR_SECTION
mfspr r11,SPRN_L3CR
andis. r11,r11,L3CR_L3E@h
beq 1f
END_FTR_SECTION_IFSET(CPU_FTR_L3CR)
lwz r6,CPU_SPEC_FEATURES(r5)
andi. r0,r6,CPU_FTR_L3_DISABLE_NAP
beq 1f
......
......@@ -73,23 +73,6 @@ _GLOBAL(__970_cpu_preinit)
isync
blr
_GLOBAL(__setup_cpu_power4)
blr
_GLOBAL(__setup_cpu_be)
/* Set large page sizes LP=0: 16MB, LP=1: 64KB */
addi r3, 0, 0
ori r3, r3, HID6_LB
sldi r3, r3, 32
nor r3, r3, r3
mfspr r4, SPRN_HID6
and r4, r4, r3
addi r3, 0, 0x02000
sldi r3, r3, 32
or r4, r4, r3
mtspr SPRN_HID6, r4
blr
_GLOBAL(__setup_cpu_ppc970)
mfspr r0,SPRN_HID0
li r11,5 /* clear DOZE and SLEEP */
......
This diff is collapsed.
......@@ -22,6 +22,7 @@
#include <linux/elf.h>
#include <linux/elfcore.h>
#include <linux/init.h>
#include <linux/irq.h>
#include <linux/types.h>
#include <asm/processor.h>
......@@ -174,6 +175,8 @@ static void crash_kexec_prepare_cpus(void)
void default_machine_crash_shutdown(struct pt_regs *regs)
{
unsigned int irq;
/*
* This function is only called after the system
* has paniced or is otherwise in a critical state.
......@@ -186,6 +189,16 @@ void default_machine_crash_shutdown(struct pt_regs *regs)
*/
local_irq_disable();
for_each_irq(irq) {
struct irq_desc *desc = irq_descp(irq);
if (desc->status & IRQ_INPROGRESS)
desc->handler->end(irq);
if (!(desc->status & IRQ_DISABLED))
desc->handler->disable(irq);
}
if (ppc_md.kexec_cpu_down)
ppc_md.kexec_cpu_down(1, 0);
......
......@@ -25,6 +25,11 @@
#define DBG(fmt...)
#endif
void reserve_kdump_trampoline(void)
{
lmb_reserve(0, KDUMP_RESERVE_LIMIT);
}
static void __init create_trampoline(unsigned long addr)
{
/* The maximum range of a single instruction branch, is the current
......@@ -39,11 +44,11 @@ static void __init create_trampoline(unsigned long addr)
create_branch(addr + 4, addr + PHYSICAL_START, 0);
}
void __init kdump_setup(void)
void __init setup_kdump_trampoline(void)
{
unsigned long i;
DBG(" -> kdump_setup()\n");
DBG(" -> setup_kdump_trampoline()\n");
for (i = KDUMP_TRAMPOLINE_START; i < KDUMP_TRAMPOLINE_END; i += 8) {
create_trampoline(i);
......@@ -52,7 +57,7 @@ void __init kdump_setup(void)
create_trampoline(__pa(system_reset_fwnmi) - PHYSICAL_START);
create_trampoline(__pa(machine_check_fwnmi) - PHYSICAL_START);
DBG(" <- kdump_setup()\n");
DBG(" <- setup_kdump_trampoline()\n");
}
#ifdef CONFIG_PROC_VMCORE
......
......@@ -57,6 +57,7 @@ system_call_common:
beq- 1f
ld r1,PACAKSAVE(r13)
1: std r10,0(r1)
crclr so
std r11,_NIP(r1)
std r12,_MSR(r1)
std r0,GPR0(r1)
......@@ -75,7 +76,6 @@ system_call_common:
std r11,GPR11(r1)
std r11,GPR12(r1)
std r9,GPR13(r1)
crclr so
mfcr r9
mflr r10
li r11,0xc01
......
......@@ -72,7 +72,7 @@ _GLOBAL(load_up_fpu)
std r12,_MSR(r1)
#endif
lfd fr0,THREAD_FPSCR(r5)
mtfsf 0xff,fr0
MTFSF_L(fr0)
REST_32FPRS(0, r5)
#ifndef CONFIG_SMP
subi r4,r5,THREAD
......@@ -127,7 +127,7 @@ _GLOBAL(giveup_fpu)
_GLOBAL(cvt_fd)
lfd 0,THREAD_FPSCR(r5) /* load up fpscr value */
mtfsf 0xff,0
MTFSF_L(0)
lfs 0,0(r3)
stfd 0,0(r4)
mffs 0
......@@ -136,7 +136,7 @@ _GLOBAL(cvt_fd)
_GLOBAL(cvt_df)
lfd 0,THREAD_FPSCR(r5) /* load up fpscr value */
mtfsf 0xff,0
MTFSF_L(0)
lfd 0,0(r3)
stfs 0,0(r4)
mffs 0
......
......@@ -973,6 +973,13 @@ __secondary_start_gemini:
b __secondary_start
#endif /* CONFIG_GEMINI */
.globl __secondary_start_mpc86xx
__secondary_start_mpc86xx:
mfspr r3, SPRN_PIR
stw r3, __secondary_hold_acknowledge@l(0)
mr r24, r3 /* cpu # */
b __secondary_start
.globl __secondary_start_pmac_0
__secondary_start_pmac_0:
/* NB the entries for cpus 0, 1, 2 must each occupy 8 bytes. */
......@@ -1088,7 +1095,12 @@ load_up_mmu:
LOAD_BAT(1,r3,r4,r5)
LOAD_BAT(2,r3,r4,r5)
LOAD_BAT(3,r3,r4,r5)
BEGIN_FTR_SECTION
LOAD_BAT(4,r3,r4,r5)
LOAD_BAT(5,r3,r4,r5)
LOAD_BAT(6,r3,r4,r5)
LOAD_BAT(7,r3,r4,r5)
END_FTR_SECTION_IFSET(CPU_FTR_HAS_HIGH_BATS)
blr
/*
......
......@@ -316,6 +316,21 @@ label##_pSeries: \
mtspr SPRN_SPRG1,r13; /* save r13 */ \
EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, label##_common)
#define HSTD_EXCEPTION_PSERIES(n, label) \
. = n; \
.globl label##_pSeries; \
label##_pSeries: \
HMT_MEDIUM; \
mtspr SPRN_SPRG1,r20; /* save r20 */ \
mfspr r20,SPRN_HSRR0; /* copy HSRR0 to SRR0 */ \
mtspr SPRN_SRR0,r20; \
mfspr r20,SPRN_HSRR1; /* copy HSRR0 to SRR0 */ \
mtspr SPRN_SRR1,r20; \
mfspr r20,SPRN_SPRG1; /* restore r20 */ \
mtspr SPRN_SPRG1,r13; /* save r13 */ \
EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, label##_common)
#define STD_EXCEPTION_ISERIES(n, label, area) \
.globl label##_iSeries; \
label##_iSeries: \
......@@ -544,8 +559,17 @@ system_call_pSeries:
STD_EXCEPTION_PSERIES(0xf20, altivec_unavailable)
#ifdef CONFIG_CBE_RAS
HSTD_EXCEPTION_PSERIES(0x1200, cbe_system_error)
#endif /* CONFIG_CBE_RAS */
STD_EXCEPTION_PSERIES(0x1300, instruction_breakpoint)
#ifdef CONFIG_CBE_RAS
HSTD_EXCEPTION_PSERIES(0x1600, cbe_maintenance)
#endif /* CONFIG_CBE_RAS */
STD_EXCEPTION_PSERIES(0x1700, altivec_assist)
#ifdef CONFIG_CBE_RAS
HSTD_EXCEPTION_PSERIES(0x1800, cbe_thermal)
#endif /* CONFIG_CBE_RAS */
. = 0x3000
......@@ -827,6 +851,11 @@ machine_check_common:
#else
STD_EXCEPTION_COMMON(0x1700, altivec_assist, .unknown_exception)
#endif
#ifdef CONFIG_CBE_RAS
STD_EXCEPTION_COMMON(0x1200, cbe_system_error, .cbe_system_error_exception)
STD_EXCEPTION_COMMON(0x1600, cbe_maintenance, .cbe_maintenance_exception)
STD_EXCEPTION_COMMON(0x1800, cbe_thermal, .cbe_thermal_exception)
#endif /* CONFIG_CBE_RAS */
/*
* Here we have detected that the kernel stack pointer is bad.
......
......@@ -106,8 +106,6 @@ EXPORT_SYMBOL(iowrite32_rep);
void __iomem *ioport_map(unsigned long port, unsigned int len)
{
if (!_IO_IS_VALID(port))
return NULL;
return (void __iomem *) (port+pci_io_base);
}
......
......@@ -418,10 +418,11 @@ void iommu_unmap_sg(struct iommu_table *tbl, struct scatterlist *sglist,
* Build a iommu_table structure. This contains a bit map which
* is used to manage allocation of the tce space.
*/
struct iommu_table *iommu_init_table(struct iommu_table *tbl)
struct iommu_table *iommu_init_table(struct iommu_table *tbl, int nid)
{
unsigned long sz;
static int welcomed = 0;
struct page *page;
/* Set aside 1/4 of the table for large allocations. */
tbl->it_halfpoint = tbl->it_size * 3 / 4;
......@@ -429,10 +430,10 @@ struct iommu_table *iommu_init_table(struct iommu_table *tbl)
/* number of bytes needed for the bitmap */
sz = (tbl->it_size + 7) >> 3;
tbl->it_map = (unsigned long *)__get_free_pages(GFP_ATOMIC, get_order(sz));
if (!tbl->it_map)
page = alloc_pages_node(nid, GFP_ATOMIC, get_order(sz));
if (!page)
panic("iommu_init_table: Can't allocate %ld bytes\n", sz);
tbl->it_map = page_address(page);
memset(tbl->it_map, 0, sz);
tbl->it_hint = 0;
......@@ -536,11 +537,12 @@ void iommu_unmap_single(struct iommu_table *tbl, dma_addr_t dma_handle,
* to the dma address (mapping) of the first page.
*/
void *iommu_alloc_coherent(struct iommu_table *tbl, size_t size,
dma_addr_t *dma_handle, unsigned long mask, gfp_t flag)
dma_addr_t *dma_handle, unsigned long mask, gfp_t flag, int node)
{
void *ret = NULL;
dma_addr_t mapping;
unsigned int npages, order;
struct page *page;
size = PAGE_ALIGN(size);
npages = size >> PAGE_SHIFT;
......@@ -560,9 +562,10 @@ void *iommu_alloc_coherent(struct iommu_table *tbl, size_t size,
return NULL;
/* Alloc enough pages (and possibly more) */
ret = (void *)__get_free_pages(flag, order);
if (!ret)
page = alloc_pages_node(node, flag, order);
if (!page)
return NULL;
ret = page_address(page);
memset(ret, 0, size);
/* Set up tces to cover the allocated range */
......@@ -570,9 +573,9 @@ void *iommu_alloc_coherent(struct iommu_table *tbl, size_t size,
mask >> PAGE_SHIFT, order);
if (mapping == DMA_ERROR_CODE) {
free_pages((unsigned long)ret, order);
ret = NULL;
} else
*dma_handle = mapping;
return NULL;
}
*dma_handle = mapping;
return ret;
}
......
......@@ -47,6 +47,7 @@
#include <linux/cpumask.h>
#include <linux/profile.h>
#include <linux/bitops.h>
#include <linux/pci.h>
#include <asm/uaccess.h>
#include <asm/system.h>
......@@ -436,6 +437,30 @@ void do_softirq(void)
}
EXPORT_SYMBOL(do_softirq);
#ifdef CONFIG_PCI_MSI
int pci_enable_msi(struct pci_dev * pdev)
{
if (ppc_md.enable_msi)
return ppc_md.enable_msi(pdev);
else
return -1;
}
void pci_disable_msi(struct pci_dev * pdev)
{
if (ppc_md.disable_msi)
ppc_md.disable_msi(pdev);
}
void pci_scan_msi_device(struct pci_dev *dev) {}
int pci_enable_msix(struct pci_dev* dev, struct msix_entry *entries, int nvec) {return -1;}
void pci_disable_msix(struct pci_dev *dev) {}
void msi_remove_pci_irq_vectors(struct pci_dev *dev) {}
void disable_msi_mode(struct pci_dev *dev, int pos, int type) {}
void pci_no_msi(void) {}
#endif
#ifdef CONFIG_PPC64
static int __init setup_noirqdistrib(char *str)
{
......
......@@ -521,10 +521,10 @@ static ssize_t lparcfg_write(struct file *file, const char __user * buf,
current_weight = (resource >> 5 * 8) & 0xFF;
pr_debug("%s: current_entitled = %lu, current_weight = %lu\n",
pr_debug("%s: current_entitled = %lu, current_weight = %u\n",
__FUNCTION__, current_entitled, current_weight);
pr_debug("%s: new_entitled = %lu, new_weight = %lu\n",
pr_debug("%s: new_entitled = %lu, new_weight = %u\n",
__FUNCTION__, *new_entitled_ptr, *new_weight_ptr);
retval = plpar_hcall_norets(H_SET_PPP, *new_entitled_ptr,
......
......@@ -21,6 +21,7 @@
#include <asm/machdep.h>
#include <asm/cacheflush.h>
#include <asm/paca.h>
#include <asm/lmb.h>
#include <asm/mmu.h>
#include <asm/sections.h> /* _end */
#include <asm/prom.h>
......@@ -335,7 +336,105 @@ static void __init export_htab_values(void)
of_node_put(node);
}
static struct property crashk_base_prop = {
.name = "linux,crashkernel-base",
.length = sizeof(unsigned long),
.value = (unsigned char *)&crashk_res.start,
};
static unsigned long crashk_size;
static struct property crashk_size_prop = {
.name = "linux,crashkernel-size",
.length = sizeof(unsigned long),
.value = (unsigned char *)&crashk_size,
};
static void __init export_crashk_values(void)
{
struct device_node *node;
struct property *prop;
node = of_find_node_by_path("/chosen");
if (!node)
return;
/* There might be existing crash kernel properties, but we can't
* be sure what's in them, so remove them. */
prop = of_find_property(node, "linux,crashkernel-base", NULL);
if (prop)
prom_remove_property(node, prop);
prop = of_find_property(node, "linux,crashkernel-size", NULL);
if (prop)
prom_remove_property(node, prop);
if (crashk_res.start != 0) {
prom_add_property(node, &crashk_base_prop);
crashk_size = crashk_res.end - crashk_res.start + 1;
prom_add_property(node, &crashk_size_prop);
}
of_node_put(node);
}
void __init kexec_setup(void)
{
export_htab_values();
export_crashk_values();
}
static int __init early_parse_crashk(char *p)
{
unsigned long size;
if (!p)
return 1;
size = memparse(p, &p);
if (*p == '@')
crashk_res.start = memparse(p + 1, &p);
else
crashk_res.start = KDUMP_KERNELBASE;
crashk_res.end = crashk_res.start + size - 1;
return 0;
}
early_param("crashkernel", early_parse_crashk);
void __init reserve_crashkernel(void)
{
unsigned long size;
if (crashk_res.start == 0)
return;
/* We might have got these values via the command line or the
* device tree, either way sanitise them now. */
size = crashk_res.end - crashk_res.start + 1;
if (crashk_res.start != KDUMP_KERNELBASE)
printk("Crash kernel location must be 0x%x\n",
KDUMP_KERNELBASE);
crashk_res.start = KDUMP_KERNELBASE;
size = PAGE_ALIGN(size);
crashk_res.end = crashk_res.start + size - 1;
/* Crash kernel trumps memory limit */
if (memory_limit && memory_limit <= crashk_res.end) {
memory_limit = crashk_res.end + 1;
printk("Adjusted memory limit for crashkernel, now 0x%lx\n",
memory_limit);
}
lmb_reserve(crashk_res.start, size);
}
int overlaps_crashkernel(unsigned long start, unsigned long size)
{
return (start + size) > crashk_res.start && start <= crashk_res.end;
}
......@@ -216,7 +216,7 @@ _GLOBAL(call_setup_cpu)
lwz r4,0(r4)
add r4,r4,r3
lwz r5,CPU_SPEC_SETUP(r4)
cmpi 0,r5,0
cmpwi 0,r5,0
add r5,r5,r3
beqlr
mtctr r5
......
......@@ -482,7 +482,9 @@ _GLOBAL(identify_cpu)
sub r0,r3,r5
std r0,0(r4)
ld r4,CPU_SPEC_SETUP(r3)
cmpdi 0,r4,0
add r4,r4,r5
beqlr
ld r4,0(r4)
add r4,r4,r5
mtctr r4
......@@ -768,9 +770,6 @@ _GLOBAL(giveup_altivec)
#endif /* CONFIG_ALTIVEC */
_GLOBAL(__setup_cpu_power3)
blr
_GLOBAL(execve)
li r0,__NR_execve
sc
......
......@@ -204,7 +204,7 @@ static void nvram_print_partitions(char * label)
printk(KERN_WARNING "indx\t\tsig\tchks\tlen\tname\n");
list_for_each(p, &nvram_part->partition) {
tmp_part = list_entry(p, struct nvram_partition, partition);
printk(KERN_WARNING "%d \t%02x\t%02x\t%d\t%s\n",
printk(KERN_WARNING "%4d \t%02x\t%02x\t%d\t%s\n",
tmp_part->index, tmp_part->header.signature,
tmp_part->header.checksum, tmp_part->header.length,
tmp_part->header.name);
......
......@@ -1113,9 +1113,10 @@ check_for_io_childs(struct pci_bus *bus, struct resource* res, int *found_vga)
int i;
int rc = 0;
#define push_end(res, size) do { unsigned long __sz = (size) ; \
res->end = ((res->end + __sz) / (__sz + 1)) * (__sz + 1) + __sz; \
} while (0)
#define push_end(res, mask) do { \
BUG_ON((mask+1) & mask); \
res->end = (res->end + mask) | mask; \
} while (0)
list_for_each_entry(dev, &bus->devices, bus_list) {
u16 class = dev->class >> 8;
......
......@@ -42,14 +42,6 @@
unsigned long pci_probe_only = 1;
int pci_assign_all_buses = 0;
/*
* legal IO pages under MAX_ISA_PORT. This is to ensure we don't touch
* devices we don't have access to.
*/
unsigned long io_page_mask;
EXPORT_SYMBOL(io_page_mask);
#ifdef CONFIG_PPC_MULTIPLATFORM
static void fixup_resource(struct resource *res, struct pci_dev *dev);
static void do_bus_setup(struct pci_bus *bus);
......@@ -235,8 +227,10 @@ struct pci_controller * pcibios_alloc_controller(struct device_node *dev)
pci_setup_pci_controller(phb);
phb->arch_data = dev;
phb->is_dynamic = mem_init_done;
if (dev)
if (dev) {
PHB_SET_NODE(phb, of_node_to_nid(dev));
add_linux_pci_domain(dev, phb);
}
return phb;
}
......@@ -396,7 +390,7 @@ struct pci_dev *of_create_pci_dev(struct device_node *node,
dev->current_state = 4; /* unknown power state */
if (!strcmp(type, "pci")) {
if (!strcmp(type, "pci") || !strcmp(type, "pciex")) {
/* a PCI-PCI bridge */
dev->hdr_type = PCI_HEADER_TYPE_BRIDGE;
dev->rom_base_reg = PCI_ROM_ADDRESS1;
......@@ -605,7 +599,7 @@ static int __init pcibios_init(void)
iSeries_pcibios_init();
#endif
printk("PCI: Probing PCI hardware\n");
printk(KERN_DEBUG "PCI: Probing PCI hardware\n");
/* Scan all of the recorded PCI controllers. */
list_for_each_entry_safe(hose, tmp, &hose_list, list_node) {
......@@ -630,14 +624,14 @@ static int __init pcibios_init(void)
/* Cache the location of the ISA bridge (if we have one) */
ppc64_isabridge_dev = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL);
if (ppc64_isabridge_dev != NULL)
printk("ISA bridge at %s\n", pci_name(ppc64_isabridge_dev));
printk(KERN_DEBUG "ISA bridge at %s\n", pci_name(ppc64_isabridge_dev));
#ifdef CONFIG_PPC_MULTIPLATFORM
/* map in PCI I/O space */
phbs_remap_io();
#endif
printk("PCI: Probing PCI hardware done\n");
printk(KERN_DEBUG "PCI: Probing PCI hardware done\n");
return 0;
}
......@@ -804,7 +798,7 @@ static pgprot_t __pci_mmap_set_pgprot(struct pci_dev *dev, struct resource *rp,
else
prot |= _PAGE_GUARDED;
printk("PCI map for %s:%lx, prot: %lx\n", pci_name(dev), rp->start,
printk(KERN_DEBUG "PCI map for %s:%lx, prot: %lx\n", pci_name(dev), rp->start,
prot);
return __pgprot(prot);
......@@ -894,8 +888,8 @@ int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
return ret;
}
#ifdef CONFIG_PPC_MULTIPLATFORM
static ssize_t pci_show_devspec(struct device *dev, struct device_attribute *attr, char *buf)
static ssize_t pci_show_devspec(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct pci_dev *pdev;
struct device_node *np;
......@@ -907,13 +901,10 @@ static ssize_t pci_show_devspec(struct device *dev, struct device_attribute *att
return sprintf(buf, "%s", np->full_name);
}
static DEVICE_ATTR(devspec, S_IRUGO, pci_show_devspec, NULL);
#endif /* CONFIG_PPC_MULTIPLATFORM */
void pcibios_add_platform_entries(struct pci_dev *pdev)
{
#ifdef CONFIG_PPC_MULTIPLATFORM
device_create_file(&pdev->dev, &dev_attr_devspec);
#endif /* CONFIG_PPC_MULTIPLATFORM */
}
#ifdef CONFIG_PPC_MULTIPLATFORM
......@@ -1104,8 +1095,6 @@ void __init pci_setup_phb_io(struct pci_controller *hose, int primary)
pci_process_ISA_OF_ranges(isa_dn, hose->io_base_phys,
hose->io_base_virt);
of_node_put(isa_dn);
/* Allow all IO */
io_page_mask = -1;
}
}
......@@ -1212,7 +1201,7 @@ int remap_bus_range(struct pci_bus *bus)
return 1;
if (start_phys == 0)
return 1;
printk("mapping IO %lx -> %lx, size: %lx\n", start_phys, start_virt, size);
printk(KERN_DEBUG "mapping IO %lx -> %lx, size: %lx\n", start_phys, start_virt, size);
if (__ioremap_explicit(start_phys, start_virt, size,
_PAGE_NO_CACHE | _PAGE_GUARDED))
return 1;
......@@ -1232,27 +1221,13 @@ static void phbs_remap_io(void)
static void __devinit fixup_resource(struct resource *res, struct pci_dev *dev)
{
struct pci_controller *hose = pci_bus_to_host(dev->bus);
unsigned long start, end, mask, offset;
unsigned long offset;
if (res->flags & IORESOURCE_IO) {
offset = (unsigned long)hose->io_base_virt - pci_io_base;
start = res->start += offset;
end = res->end += offset;
/* Need to allow IO access to pages that are in the
ISA range */
if (start < MAX_ISA_PORT) {
if (end > MAX_ISA_PORT)
end = MAX_ISA_PORT;
start >>= PAGE_SHIFT;
end >>= PAGE_SHIFT;
/* get the range of pages for the map */
mask = ((1 << (end+1)) - 1) ^ ((1 << start) - 1);
io_page_mask |= mask;
}
res->start += offset;
res->end += offset;
} else if (res->flags & IORESOURCE_MEM) {
res->start += hose->pci_mem_offset;
res->end += hose->pci_mem_offset;
......@@ -1442,3 +1417,12 @@ long sys_pciconfig_iobase(long which, unsigned long in_bus,
return -EOPNOTSUPP;
}
#ifdef CONFIG_NUMA
int pcibus_to_node(struct pci_bus *bus)
{
struct pci_controller *phb = pci_bus_to_host(bus);
return phb->node;
}
EXPORT_SYMBOL(pcibus_to_node);
#endif
......@@ -82,13 +82,17 @@ static int pci_direct_dma_supported(struct device *dev, u64 mask)
return mask < 0x100000000ull;
}
static struct dma_mapping_ops pci_direct_ops = {
.alloc_coherent = pci_direct_alloc_coherent,
.free_coherent = pci_direct_free_coherent,
.map_single = pci_direct_map_single,
.unmap_single = pci_direct_unmap_single,
.map_sg = pci_direct_map_sg,
.unmap_sg = pci_direct_unmap_sg,
.dma_supported = pci_direct_dma_supported,
};
void __init pci_direct_iommu_init(void)
{
pci_dma_ops.alloc_coherent = pci_direct_alloc_coherent;
pci_dma_ops.free_coherent = pci_direct_free_coherent;
pci_dma_ops.map_single = pci_direct_map_single;
pci_dma_ops.unmap_single = pci_direct_unmap_single;
pci_dma_ops.map_sg = pci_direct_map_sg;
pci_dma_ops.unmap_sg = pci_direct_unmap_sg;
pci_dma_ops.dma_supported = pci_direct_dma_supported;
pci_dma_ops = pci_direct_ops;
}
......@@ -31,6 +31,7 @@
#include <asm/pci-bridge.h>
#include <asm/pSeries_reconfig.h>
#include <asm/ppc-pci.h>
#include <asm/firmware.h>
/*
* Traverse_func that inits the PCI fields of the device node.
......@@ -59,6 +60,11 @@ static void * __devinit update_dn_pci_info(struct device_node *dn, void *data)
pdn->busno = (regs[0] >> 16) & 0xff;
pdn->devfn = (regs[0] >> 8) & 0xff;
}
if (firmware_has_feature(FW_FEATURE_ISERIES)) {
u32 *busp = (u32 *)get_property(dn, "linux,subbus", NULL);
if (busp)
pdn->bussubno = *busp;
}
pdn->pci_ext_config_space = (type && *type == 1);
return NULL;
......
......@@ -44,16 +44,16 @@
*/
#define PCI_GET_DN(dev) ((struct device_node *)((dev)->sysdata))
static inline struct iommu_table *devnode_table(struct device *dev)
static inline struct iommu_table *device_to_table(struct device *hwdev)
{
struct pci_dev *pdev;
if (!dev) {
if (!hwdev) {
pdev = ppc64_isabridge_dev;
if (!pdev)
return NULL;
} else
pdev = to_pci_dev(dev);
pdev = to_pci_dev(hwdev);
return PCI_DN(PCI_GET_DN(pdev))->iommu_table;
}
......@@ -85,14 +85,15 @@ static inline unsigned long device_to_mask(struct device *hwdev)
static void *pci_iommu_alloc_coherent(struct device *hwdev, size_t size,
dma_addr_t *dma_handle, gfp_t flag)
{
return iommu_alloc_coherent(devnode_table(hwdev), size, dma_handle,
device_to_mask(hwdev), flag);
return iommu_alloc_coherent(device_to_table(hwdev), size, dma_handle,
device_to_mask(hwdev), flag,
pcibus_to_node(to_pci_dev(hwdev)->bus));
}
static void pci_iommu_free_coherent(struct device *hwdev, size_t size,
void *vaddr, dma_addr_t dma_handle)
{
iommu_free_coherent(devnode_table(hwdev), size, vaddr, dma_handle);
iommu_free_coherent(device_to_table(hwdev), size, vaddr, dma_handle);
}
/* Creates TCEs for a user provided buffer. The user buffer must be
......@@ -104,7 +105,7 @@ static void pci_iommu_free_coherent(struct device *hwdev, size_t size,
static dma_addr_t pci_iommu_map_single(struct device *hwdev, void *vaddr,
size_t size, enum dma_data_direction direction)
{
return iommu_map_single(devnode_table(hwdev), vaddr, size,
return iommu_map_single(device_to_table(hwdev), vaddr, size,
device_to_mask(hwdev), direction);
}
......@@ -112,27 +113,27 @@ static dma_addr_t pci_iommu_map_single(struct device *hwdev, void *vaddr,
static void pci_iommu_unmap_single(struct device *hwdev, dma_addr_t dma_handle,
size_t size, enum dma_data_direction direction)
{
iommu_unmap_single(devnode_table(hwdev), dma_handle, size, direction);
iommu_unmap_single(device_to_table(hwdev), dma_handle, size, direction);
}
static int pci_iommu_map_sg(struct device *pdev, struct scatterlist *sglist,
int nelems, enum dma_data_direction direction)
{
return iommu_map_sg(pdev, devnode_table(pdev), sglist,
return iommu_map_sg(pdev, device_to_table(pdev), sglist,
nelems, device_to_mask(pdev), direction);
}
static void pci_iommu_unmap_sg(struct device *pdev, struct scatterlist *sglist,
int nelems, enum dma_data_direction direction)
{
iommu_unmap_sg(devnode_table(pdev), sglist, nelems, direction);
iommu_unmap_sg(device_to_table(pdev), sglist, nelems, direction);
}
/* We support DMA to/from any memory page via the iommu */
static int pci_iommu_dma_supported(struct device *dev, u64 mask)
{
struct iommu_table *tbl = devnode_table(dev);
struct iommu_table *tbl = device_to_table(dev);
if (!tbl || tbl->it_offset > mask) {
printk(KERN_INFO "Warning: IOMMU table offset too big for device mask\n");
......@@ -147,13 +148,17 @@ static int pci_iommu_dma_supported(struct device *dev, u64 mask)
return 1;
}
struct dma_mapping_ops pci_iommu_ops = {
.alloc_coherent = pci_iommu_alloc_coherent,
.free_coherent = pci_iommu_free_coherent,
.map_single = pci_iommu_map_single,
.unmap_single = pci_iommu_unmap_single,
.map_sg = pci_iommu_map_sg,
.unmap_sg = pci_iommu_unmap_sg,
.dma_supported = pci_iommu_dma_supported,
};
void pci_iommu_init(void)
{
pci_dma_ops.alloc_coherent = pci_iommu_alloc_coherent;
pci_dma_ops.free_coherent = pci_iommu_free_coherent;
pci_dma_ops.map_single = pci_iommu_map_single;
pci_dma_ops.unmap_single = pci_iommu_unmap_single;
pci_dma_ops.map_sg = pci_iommu_map_sg;
pci_dma_ops.unmap_sg = pci_iommu_unmap_sg;
pci_dma_ops.dma_supported = pci_iommu_dma_supported;
pci_dma_ops = pci_iommu_ops;
}
......@@ -52,7 +52,7 @@ static int __init proc_ppc64_create(void)
if (!root)
return 1;
if (!machine_is(pseries) && !machine_is(cell))
if (!of_find_node_by_path("/rtas"))
return 0;
if (!proc_mkdir("rtas", root))
......
......@@ -708,6 +708,61 @@ int get_fpexc_mode(struct task_struct *tsk, unsigned long adr)
return put_user(val, (unsigned int __user *) adr);
}
int set_endian(struct task_struct *tsk, unsigned int val)
{
struct pt_regs *regs = tsk->thread.regs;
if ((val == PR_ENDIAN_LITTLE && !cpu_has_feature(CPU_FTR_REAL_LE)) ||
(val == PR_ENDIAN_PPC_LITTLE && !cpu_has_feature(CPU_FTR_PPC_LE)))
return -EINVAL;
if (regs == NULL)
return -EINVAL;
if (val == PR_ENDIAN_BIG)
regs->msr &= ~MSR_LE;
else if (val == PR_ENDIAN_LITTLE || val == PR_ENDIAN_PPC_LITTLE)
regs->msr |= MSR_LE;
else
return -EINVAL;
return 0;
}
int get_endian(struct task_struct *tsk, unsigned long adr)
{
struct pt_regs *regs = tsk->thread.regs;
unsigned int val;
if (!cpu_has_feature(CPU_FTR_PPC_LE) &&
!cpu_has_feature(CPU_FTR_REAL_LE))
return -EINVAL;
if (regs == NULL)
return -EINVAL;
if (regs->msr & MSR_LE) {
if (cpu_has_feature(CPU_FTR_REAL_LE))
val = PR_ENDIAN_LITTLE;
else
val = PR_ENDIAN_PPC_LITTLE;
} else
val = PR_ENDIAN_BIG;
return put_user(val, (unsigned int __user *)adr);
}
int set_unalign_ctl(struct task_struct *tsk, unsigned int val)
{
tsk->thread.align_ctl = val;
return 0;
}
int get_unalign_ctl(struct task_struct *tsk, unsigned long adr)
{
return put_user(tsk->thread.align_ctl, (unsigned int __user *)adr);
}
#define TRUNC_PTR(x) ((typeof(x))(((unsigned long)(x)) & 0xffffffff))
int sys_clone(unsigned long clone_flags, unsigned long usp,
......
......@@ -50,6 +50,7 @@
#include <asm/machdep.h>
#include <asm/pSeries_reconfig.h>
#include <asm/pci-bridge.h>
#include <asm/kexec.h>
#ifdef DEBUG
#define DBG(fmt...) printk(KERN_ERR fmt)
......@@ -836,6 +837,42 @@ static unsigned long __init unflatten_dt_node(unsigned long mem,
return mem;
}
static int __init early_parse_mem(char *p)
{
if (!p)
return 1;
memory_limit = PAGE_ALIGN(memparse(p, &p));
DBG("memory limit = 0x%lx\n", memory_limit);
return 0;
}
early_param("mem", early_parse_mem);
/*
* The device tree may be allocated below our memory limit, or inside the
* crash kernel region for kdump. If so, move it out now.
*/
static void move_device_tree(void)
{
unsigned long start, size;
void *p;
DBG("-> move_device_tree\n");
start = __pa(initial_boot_params);
size = initial_boot_params->totalsize;
if ((memory_limit && (start + size) > memory_limit) ||
overlaps_crashkernel(start, size)) {
p = __va(lmb_alloc_base(size, PAGE_SIZE, lmb.rmo_size));
memcpy(p, initial_boot_params, size);
initial_boot_params = (struct boot_param_header *)p;
DBG("Moved device tree to 0x%p\n", p);
}
DBG("<- move_device_tree\n");
}
/**
* unflattens the device-tree passed by the firmware, creating the
......@@ -911,7 +948,10 @@ static struct ibm_pa_feature {
{CPU_FTR_CTRL, 0, 0, 3, 0},
{CPU_FTR_NOEXECUTE, 0, 0, 6, 0},
{CPU_FTR_NODSISRALIGN, 0, 1, 1, 1},
#if 0
/* put this back once we know how to test if firmware does 64k IO */
{CPU_FTR_CI_LARGE_PAGE, 0, 1, 2, 0},
#endif
};
static void __init check_cpu_pa_features(unsigned long node)
......@@ -1070,6 +1110,7 @@ static int __init early_init_dt_scan_chosen(unsigned long node,
iommu_force_on = 1;
#endif
/* mem=x on the command line is the preferred mechanism */
lprop = of_get_flat_dt_prop(node, "linux,memory-limit", NULL);
if (lprop)
memory_limit = *lprop;
......@@ -1123,17 +1164,6 @@ static int __init early_init_dt_scan_chosen(unsigned long node,
DBG("Command line is: %s\n", cmd_line);
if (strstr(cmd_line, "mem=")) {
char *p, *q;
for (q = cmd_line; (p = strstr(q, "mem=")) != 0; ) {
q = p + 4;
if (p > cmd_line && p[-1] != ' ')
continue;
memory_limit = memparse(q, &q);
}
}
/* break now */
return 1;
}
......@@ -1237,9 +1267,17 @@ static void __init early_reserve_mem(void)
{
u64 base, size;
u64 *reserve_map;
unsigned long self_base;
unsigned long self_size;
reserve_map = (u64 *)(((unsigned long)initial_boot_params) +
initial_boot_params->off_mem_rsvmap);
/* before we do anything, lets reserve the dt blob */
self_base = __pa((unsigned long)initial_boot_params);
self_size = initial_boot_params->totalsize;
lmb_reserve(self_base, self_size);
#ifdef CONFIG_PPC32
/*
* Handle the case where we might be booting from an old kexec
......@@ -1254,6 +1292,9 @@ static void __init early_reserve_mem(void)
size_32 = *(reserve_map_32++);
if (size_32 == 0)
break;
/* skip if the reservation is for the blob */
if (base_32 == self_base && size_32 == self_size)
continue;
DBG("reserving: %x -> %x\n", base_32, size_32);
lmb_reserve(base_32, size_32);
}
......@@ -1265,6 +1306,9 @@ static void __init early_reserve_mem(void)
size = *(reserve_map++);
if (size == 0)
break;
/* skip if the reservation is for the blob */
if (base == self_base && size == self_size)
continue;
DBG("reserving: %llx -> %llx\n", base, size);
lmb_reserve(base, size);
}
......@@ -1292,18 +1336,26 @@ void __init early_init_devtree(void *params)
lmb_init();
of_scan_flat_dt(early_init_dt_scan_root, NULL);
of_scan_flat_dt(early_init_dt_scan_memory, NULL);
lmb_enforce_memory_limit(memory_limit);
lmb_analyze();
DBG("Phys. mem: %lx\n", lmb_phys_mem_size());
/* Save command line for /proc/cmdline and then parse parameters */
strlcpy(saved_command_line, cmd_line, COMMAND_LINE_SIZE);
parse_early_param();
/* Reserve LMB regions used by kernel, initrd, dt, etc... */
lmb_reserve(PHYSICAL_START, __pa(klimit) - PHYSICAL_START);
#ifdef CONFIG_CRASH_DUMP
lmb_reserve(0, KDUMP_RESERVE_LIMIT);
#endif
reserve_kdump_trampoline();
reserve_crashkernel();
early_reserve_mem();
lmb_enforce_memory_limit(memory_limit);
lmb_analyze();
DBG("Phys. mem: %lx\n", lmb_phys_mem_size());
/* We may need to relocate the flat tree, do it now.
* FIXME .. and the initrd too? */
move_device_tree();
DBG("Scanning CPUs ...\n");
/* Retreive CPU related informations from the flat tree
......@@ -2053,29 +2105,46 @@ int prom_update_property(struct device_node *np,
return 0;
}
#ifdef CONFIG_KEXEC
/* We may have allocated the flat device tree inside the crash kernel region
* in prom_init. If so we need to move it out into regular memory. */
void kdump_move_device_tree(void)
{
unsigned long start, end;
struct boot_param_header *new;
start = __pa((unsigned long)initial_boot_params);
end = start + initial_boot_params->totalsize;
if (end < crashk_res.start || start > crashk_res.end)
return;
new = (struct boot_param_header*)
__va(lmb_alloc(initial_boot_params->totalsize, PAGE_SIZE));
memcpy(new, initial_boot_params, initial_boot_params->totalsize);
/* Find the device node for a given logical cpu number, also returns the cpu
* local thread number (index in ibm,interrupt-server#s) if relevant and
* asked for (non NULL)
*/
struct device_node *of_get_cpu_node(int cpu, unsigned int *thread)
{
int hardid;
struct device_node *np;
initial_boot_params = new;
hardid = get_hard_smp_processor_id(cpu);
DBG("Flat device tree blob moved to %p\n", initial_boot_params);
for_each_node_by_type(np, "cpu") {
u32 *intserv;
unsigned int plen, t;
/* XXX should we unreserve the old DT? */
/* Check for ibm,ppc-interrupt-server#s. If it doesn't exist
* fallback to "reg" property and assume no threads
*/
intserv = (u32 *)get_property(np, "ibm,ppc-interrupt-server#s",
&plen);
if (intserv == NULL) {
u32 *reg = (u32 *)get_property(np, "reg", NULL);
if (reg == NULL)
continue;
if (*reg == hardid) {
if (thread)
*thread = 0;
return np;
}
} else {
plen /= sizeof(u32);
for (t = 0; t < plen; t++) {
if (hardid == intserv[t]) {
if (thread)
*thread = t;
return np;
}
}
}
}
return NULL;
}
#endif /* CONFIG_KEXEC */
......@@ -194,19 +194,12 @@ static int __initdata of_platform;
static char __initdata prom_cmd_line[COMMAND_LINE_SIZE];
static unsigned long __initdata prom_memory_limit;
static unsigned long __initdata alloc_top;
static unsigned long __initdata alloc_top_high;
static unsigned long __initdata alloc_bottom;
static unsigned long __initdata rmo_top;
static unsigned long __initdata ram_top;
#ifdef CONFIG_KEXEC
static unsigned long __initdata prom_crashk_base;
static unsigned long __initdata prom_crashk_size;
#endif
static struct mem_map_entry __initdata mem_reserve_map[MEM_RESERVE_MAP_SIZE];
static int __initdata mem_reserve_cnt;
......@@ -574,7 +567,7 @@ static void __init early_cmdline_parse(void)
if ((long)_prom->chosen > 0)
l = prom_getprop(_prom->chosen, "bootargs", p, COMMAND_LINE_SIZE-1);
#ifdef CONFIG_CMDLINE
if (l == 0) /* dbl check */
if (l <= 0 || p[0] == '\0') /* dbl check */
strlcpy(RELOC(prom_cmd_line),
RELOC(CONFIG_CMDLINE), sizeof(prom_cmd_line));
#endif /* CONFIG_CMDLINE */
......@@ -593,45 +586,6 @@ static void __init early_cmdline_parse(void)
RELOC(iommu_force_on) = 1;
}
#endif
opt = strstr(RELOC(prom_cmd_line), RELOC("mem="));
if (opt) {
opt += 4;
RELOC(prom_memory_limit) = prom_memparse(opt, (const char **)&opt);
#ifdef CONFIG_PPC64
/* Align to 16 MB == size of ppc64 large page */
RELOC(prom_memory_limit) = ALIGN(RELOC(prom_memory_limit), 0x1000000);
#endif
}
#ifdef CONFIG_KEXEC
/*
* crashkernel=size@addr specifies the location to reserve for
* crash kernel.
*/
opt = strstr(RELOC(prom_cmd_line), RELOC("crashkernel="));
if (opt) {
opt += 12;
RELOC(prom_crashk_size) =
prom_memparse(opt, (const char **)&opt);
if (ALIGN(RELOC(prom_crashk_size), 0x1000000) !=
RELOC(prom_crashk_size)) {
prom_printf("Warning: crashkernel size is not "
"aligned to 16MB\n");
}
/*
* At present, the crash kernel always run at 32MB.
* Just ignore whatever user passed.
*/
RELOC(prom_crashk_base) = 0x2000000;
if (*opt == '@') {
prom_printf("Warning: PPC64 kdump kernel always runs "
"at 32 MB\n");
}
}
#endif
}
#ifdef CONFIG_PPC_PSERIES
......@@ -1115,29 +1069,6 @@ static void __init prom_init_mem(void)
RELOC(alloc_bottom) = PAGE_ALIGN(RELOC(prom_initrd_end));
}
/*
* If prom_memory_limit is set we reduce the upper limits *except* for
* alloc_top_high. This must be the real top of RAM so we can put
* TCE's up there.
*/
RELOC(alloc_top_high) = RELOC(ram_top);
if (RELOC(prom_memory_limit)) {
if (RELOC(prom_memory_limit) <= RELOC(alloc_bottom)) {
prom_printf("Ignoring mem=%x <= alloc_bottom.\n",
RELOC(prom_memory_limit));
RELOC(prom_memory_limit) = 0;
} else if (RELOC(prom_memory_limit) >= RELOC(ram_top)) {
prom_printf("Ignoring mem=%x >= ram_top.\n",
RELOC(prom_memory_limit));
RELOC(prom_memory_limit) = 0;
} else {
RELOC(ram_top) = RELOC(prom_memory_limit);
RELOC(rmo_top) = min(RELOC(rmo_top), RELOC(prom_memory_limit));
}
}
/*
* Setup our top alloc point, that is top of RMO or top of
* segment 0 when running non-LPAR.
......@@ -1150,20 +1081,14 @@ static void __init prom_init_mem(void)
RELOC(rmo_top) = RELOC(ram_top);
RELOC(rmo_top) = min(0x30000000ul, RELOC(rmo_top));
RELOC(alloc_top) = RELOC(rmo_top);
RELOC(alloc_top_high) = RELOC(ram_top);
prom_printf("memory layout at init:\n");
prom_printf(" memory_limit : %x (16 MB aligned)\n", RELOC(prom_memory_limit));
prom_printf(" alloc_bottom : %x\n", RELOC(alloc_bottom));
prom_printf(" alloc_top : %x\n", RELOC(alloc_top));
prom_printf(" alloc_top_hi : %x\n", RELOC(alloc_top_high));
prom_printf(" rmo_top : %x\n", RELOC(rmo_top));
prom_printf(" ram_top : %x\n", RELOC(ram_top));
#ifdef CONFIG_KEXEC
if (RELOC(prom_crashk_base)) {
prom_printf(" crashk_base : %x\n", RELOC(prom_crashk_base));
prom_printf(" crashk_size : %x\n", RELOC(prom_crashk_size));
}
#endif
}
......@@ -1349,16 +1274,10 @@ static void __init prom_initialize_tce_table(void)
reserve_mem(local_alloc_bottom, local_alloc_top - local_alloc_bottom);
if (RELOC(prom_memory_limit)) {
/*
* We align the start to a 16MB boundary so we can map
* the TCE area using large pages if possible.
* The end should be the top of RAM so no need to align it.
*/
RELOC(prom_tce_alloc_start) = _ALIGN_DOWN(local_alloc_bottom,
0x1000000);
RELOC(prom_tce_alloc_end) = local_alloc_top;
}
/* These are only really needed if there is a memory limit in
* effect, but we don't know so export them always. */
RELOC(prom_tce_alloc_start) = local_alloc_bottom;
RELOC(prom_tce_alloc_end) = local_alloc_top;
/* Flag the first invalid entry */
prom_debug("ending prom_initialize_tce_table\n");
......@@ -2041,11 +1960,7 @@ static void __init flatten_device_tree(void)
/* Version 16 is not backward compatible */
hdr->last_comp_version = 0x10;
/* Reserve the whole thing and copy the reserve map in, we
* also bump mem_reserve_cnt to cause further reservations to
* fail since it's too late.
*/
reserve_mem(RELOC(dt_header_start), hdr->totalsize);
/* Copy the reserve map in */
memcpy(rsvmap, RELOC(mem_reserve_map), sizeof(mem_reserve_map));
#ifdef DEBUG_PROM
......@@ -2058,6 +1973,9 @@ static void __init flatten_device_tree(void)
RELOC(mem_reserve_map)[i].size);
}
#endif
/* Bump mem_reserve_cnt to cause further reservations to fail
* since it's too late.
*/
RELOC(mem_reserve_cnt) = MEM_RESERVE_MAP_SIZE;
prom_printf("Device tree strings 0x%x -> 0x%x\n",
......@@ -2280,10 +2198,6 @@ unsigned long __init prom_init(unsigned long r3, unsigned long r4,
*/
prom_init_mem();
#ifdef CONFIG_KEXEC
if (RELOC(prom_crashk_base))
reserve_mem(RELOC(prom_crashk_base), RELOC(prom_crashk_size));
#endif
/*
* Determine which cpu is actually running right _now_
*/
......@@ -2317,10 +2231,6 @@ unsigned long __init prom_init(unsigned long r3, unsigned long r4,
/*
* Fill in some infos for use by the kernel later on
*/
if (RELOC(prom_memory_limit))
prom_setprop(_prom->chosen, "/chosen", "linux,memory-limit",
&RELOC(prom_memory_limit),
sizeof(prom_memory_limit));
#ifdef CONFIG_PPC64
if (RELOC(ppc64_iommu_off))
prom_setprop(_prom->chosen, "/chosen", "linux,iommu-off",
......@@ -2340,16 +2250,6 @@ unsigned long __init prom_init(unsigned long r3, unsigned long r4,
}
#endif
#ifdef CONFIG_KEXEC
if (RELOC(prom_crashk_base)) {
prom_setprop(_prom->chosen, "/chosen", "linux,crashkernel-base",
PTRRELOC(&prom_crashk_base),
sizeof(RELOC(prom_crashk_base)));
prom_setprop(_prom->chosen, "/chosen", "linux,crashkernel-size",
PTRRELOC(&prom_crashk_size),
sizeof(RELOC(prom_crashk_size)));
}
#endif
/*
* Fixup any known bugs in the device-tree
*/
......
......@@ -548,3 +548,28 @@ int of_pci_address_to_resource(struct device_node *dev, int bar,
return __of_address_to_resource(dev, addrp, size, flags, r);
}
EXPORT_SYMBOL_GPL(of_pci_address_to_resource);
void of_parse_dma_window(struct device_node *dn, unsigned char *dma_window_prop,
unsigned long *busno, unsigned long *phys, unsigned long *size)
{
u32 *dma_window, cells;
unsigned char *prop;
dma_window = (u32 *)dma_window_prop;
/* busno is always one cell */
*busno = *(dma_window++);
prop = get_property(dn, "ibm,#dma-address-cells", NULL);
if (!prop)
prop = get_property(dn, "#address-cells", NULL);
cells = prop ? *(u32 *)prop : prom_n_addr_cells(dn);
*phys = of_read_addr(dma_window, cells);
dma_window += cells;
prop = get_property(dn, "ibm,#dma-size-cells", NULL);
cells = prop ? *(u32 *)prop : prom_n_size_cells(dn);
*size = of_read_addr(dma_window, cells);
}
......@@ -404,7 +404,6 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
ret = ptrace_detach(child, data);
break;
#ifdef CONFIG_PPC64
case PPC_PTRACE_GETREGS: { /* Get GPRs 0 - 31. */
int i;
unsigned long *reg = &((unsigned long *)child->thread.regs)[0];
......@@ -468,7 +467,6 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
}
break;
}
#endif /* CONFIG_PPC64 */
#ifdef CONFIG_ALTIVEC
case PTRACE_GETVRREGS:
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
......@@ -313,7 +313,9 @@ unsigned long __init find_and_init_phbs(void)
for (node = of_get_next_child(root, NULL);
node != NULL;
node = of_get_next_child(root, node)) {
if (node->type == NULL || strcmp(node->type, "pci") != 0)
if (node->type == NULL || (strcmp(node->type, "pci") != 0 &&
strcmp(node->type, "pciex") != 0))
continue;
phb = pcibios_alloc_controller(node);
......
This diff is collapsed.
......@@ -2,5 +2,8 @@
#define _POWERPC_KERNEL_SETUP_H
void check_for_initrd(void);
void do_init_bootmem(void);
void setup_panic(void);
extern int do_early_xmon;
#endif /* _POWERPC_KERNEL_SETUP_H */
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
......@@ -492,7 +492,7 @@ int __devinit __cpu_up(unsigned int cpu)
* -- Cort
*/
if (system_state < SYSTEM_RUNNING)
for (c = 5000; c && !cpu_callin_map[cpu]; c--)
for (c = 50000; c && !cpu_callin_map[cpu]; c--)
udelay(100);
#ifdef CONFIG_HOTPLUG_CPU
else
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
......@@ -53,12 +53,12 @@ fpenable:
stfd fr31,8(r1)
LDCONST(fr1, fpzero)
mffs fr31
mtfsf 0xff,fr1
MTFSF_L(fr1)
blr
fpdisable:
mtlr r12
mtfsf 0xff,fr31
MTFSF_L(fr31)
lfd fr31,8(r1)
lfd fr1,16(r1)
lfd fr0,24(r1)
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment