Commit b7c8c194 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/benh/powerpc

Pull more powerpc updates from Ben Herrenschmidt:
 "Here are the remaining bits I was mentioning earlier.  Mostly bug
  fixes and new selftests from Michael (yay !).  He also removed the WSP
  platform and A2 core support which were dead before release, so less
  clutter.

  One little "feature" I snuck in is the doorbell IPI support for
  non-virtualized P8 which speeds up IPIs significantly between threads
  of a core"

* 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/benh/powerpc: (34 commits)
  powerpc/book3s: Fix some ABIv2 issues in machine check code
  powerpc/book3s: Fix guest MC delivery mechanism to avoid soft lockups in guest.
  powerpc/book3s: Increment the mce counter during machine_check_early call.
  powerpc/book3s: Add stack overflow check in machine check handler.
  powerpc/book3s: Fix machine check handling for unhandled errors
  powerpc/eeh: Dump PE location code
  powerpc/powernv: Enable POWER8 doorbell IPIs
  powerpc/cpuidle: Only clear LPCR decrementer wakeup bit on fast sleep entry
  powerpc/powernv: Fix killed EEH event
  powerpc: fix typo 'CONFIG_PMAC'
  powerpc: fix typo 'CONFIG_PPC_CPU'
  powerpc/powernv: Don't escalate non-existing frozen PE
  powerpc/eeh: Report frozen parent PE prior to child PE
  powerpc/eeh: Clear frozen state for child PE
  powerpc/powernv: Reduce panic timeout from 180s to 10s
  powerpc/xmon: avoid format string leaking to printk
  selftests/powerpc: Add tests of PMU EBBs
  selftests/powerpc: Add support for skipping tests
  selftests/powerpc: Put the test in a separate process group
  selftests/powerpc: Fix instruction loop for ABIv2 (LE)
  ...
parents 88bbfb4a ad718622
...@@ -235,11 +235,6 @@ config PPC_EARLY_DEBUG_USBGECKO ...@@ -235,11 +235,6 @@ config PPC_EARLY_DEBUG_USBGECKO
Select this to enable early debugging for Nintendo GameCube/Wii Select this to enable early debugging for Nintendo GameCube/Wii
consoles via an external USB Gecko adapter. consoles via an external USB Gecko adapter.
config PPC_EARLY_DEBUG_WSP
bool "Early debugging via WSP's internal UART"
depends on PPC_WSP
select PPC_UDBG_16550
config PPC_EARLY_DEBUG_PS3GELIC config PPC_EARLY_DEBUG_PS3GELIC
bool "Early debugging through the PS3 Ethernet port" bool "Early debugging through the PS3 Ethernet port"
depends on PPC_PS3 depends on PPC_PS3
......
CONFIG_PPC64=y
CONFIG_PPC_BOOK3E_64=y
# CONFIG_VIRT_CPU_ACCOUNTING_NATIVE is not set
CONFIG_SMP=y
CONFIG_NR_CPUS=256
CONFIG_EXPERIMENTAL=y
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
CONFIG_BSD_PROCESS_ACCT=y
CONFIG_TASKSTATS=y
CONFIG_TASK_DELAY_ACCT=y
CONFIG_TASK_XACCT=y
CONFIG_TASK_IO_ACCOUNTING=y
CONFIG_AUDIT=y
CONFIG_AUDITSYSCALL=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=19
CONFIG_CGROUPS=y
CONFIG_CGROUP_DEVICE=y
CONFIG_CPUSETS=y
CONFIG_CGROUP_CPUACCT=y
CONFIG_RESOURCE_COUNTERS=y
CONFIG_CGROUP_MEMCG=y
CONFIG_CGROUP_MEMCG_SWAP=y
CONFIG_NAMESPACES=y
CONFIG_RELAY=y
CONFIG_BLK_DEV_INITRD=y
CONFIG_INITRAMFS_SOURCE=""
CONFIG_RD_BZIP2=y
CONFIG_RD_LZMA=y
CONFIG_INITRAMFS_COMPRESSION_GZIP=y
CONFIG_KALLSYMS_ALL=y
CONFIG_EMBEDDED=y
CONFIG_PERF_EVENTS=y
CONFIG_PROFILING=y
CONFIG_OPROFILE=y
CONFIG_KPROBES=y
CONFIG_MODULES=y
CONFIG_MODULE_FORCE_LOAD=y
CONFIG_MODULE_UNLOAD=y
CONFIG_MODULE_FORCE_UNLOAD=y
CONFIG_MODVERSIONS=y
CONFIG_MODULE_SRCVERSION_ALL=y
CONFIG_SCOM_DEBUGFS=y
CONFIG_PPC_A2_DD2=y
CONFIG_KVM_GUEST=y
CONFIG_NO_HZ=y
CONFIG_HIGH_RES_TIMERS=y
CONFIG_HZ_100=y
# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
CONFIG_BINFMT_MISC=y
CONFIG_NUMA=y
# CONFIG_MIGRATION is not set
CONFIG_PPC_64K_PAGES=y
CONFIG_SCHED_SMT=y
CONFIG_CMDLINE_BOOL=y
CONFIG_CMDLINE=""
# CONFIG_SECCOMP is not set
CONFIG_PCIEPORTBUS=y
# CONFIG_PCIEASPM is not set
CONFIG_PCI_MSI=y
CONFIG_PACKET=y
CONFIG_UNIX=y
CONFIG_XFRM_USER=m
CONFIG_XFRM_SUB_POLICY=y
CONFIG_XFRM_STATISTICS=y
CONFIG_NET_KEY=m
CONFIG_NET_KEY_MIGRATE=y
CONFIG_INET=y
CONFIG_IP_MULTICAST=y
CONFIG_IP_ADVANCED_ROUTER=y
CONFIG_IP_ROUTE_MULTIPATH=y
CONFIG_IP_ROUTE_VERBOSE=y
CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
CONFIG_IP_PNP_BOOTP=y
CONFIG_NET_IPIP=y
CONFIG_IP_MROUTE=y
CONFIG_IP_PIMSM_V1=y
CONFIG_IP_PIMSM_V2=y
CONFIG_SYN_COOKIES=y
CONFIG_INET_AH=m
CONFIG_INET_ESP=m
CONFIG_INET_IPCOMP=m
CONFIG_IPV6=y
CONFIG_IPV6_PRIVACY=y
CONFIG_IPV6_ROUTER_PREF=y
CONFIG_IPV6_ROUTE_INFO=y
CONFIG_IPV6_OPTIMISTIC_DAD=y
CONFIG_INET6_AH=y
CONFIG_INET6_ESP=y
CONFIG_INET6_IPCOMP=y
CONFIG_IPV6_MIP6=y
CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION=y
CONFIG_IPV6_TUNNEL=y
CONFIG_IPV6_MULTIPLE_TABLES=y
CONFIG_IPV6_SUBTREES=y
CONFIG_IPV6_MROUTE=y
CONFIG_IPV6_PIMSM_V2=y
CONFIG_NETFILTER=y
CONFIG_NF_CONNTRACK=m
CONFIG_NF_CONNTRACK_EVENTS=y
CONFIG_NF_CT_PROTO_UDPLITE=m
CONFIG_NF_CONNTRACK_FTP=m
CONFIG_NF_CONNTRACK_IRC=m
CONFIG_NF_CONNTRACK_TFTP=m
CONFIG_NF_CT_NETLINK=m
CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
CONFIG_NETFILTER_XT_TARGET_CONNMARK=m
CONFIG_NETFILTER_XT_TARGET_MARK=m
CONFIG_NETFILTER_XT_TARGET_NFLOG=m
CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
CONFIG_NETFILTER_XT_MATCH_COMMENT=m
CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m
CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m
CONFIG_NETFILTER_XT_MATCH_CONNMARK=m
CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m
CONFIG_NETFILTER_XT_MATCH_DCCP=m
CONFIG_NETFILTER_XT_MATCH_DSCP=m
CONFIG_NETFILTER_XT_MATCH_ESP=m
CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m
CONFIG_NETFILTER_XT_MATCH_HELPER=m
CONFIG_NETFILTER_XT_MATCH_IPRANGE=m
CONFIG_NETFILTER_XT_MATCH_LENGTH=m
CONFIG_NETFILTER_XT_MATCH_LIMIT=m
CONFIG_NETFILTER_XT_MATCH_MAC=m
CONFIG_NETFILTER_XT_MATCH_MARK=m
CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m
CONFIG_NETFILTER_XT_MATCH_OWNER=m
CONFIG_NETFILTER_XT_MATCH_POLICY=m
CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m
CONFIG_NETFILTER_XT_MATCH_QUOTA=m
CONFIG_NETFILTER_XT_MATCH_RATEEST=m
CONFIG_NETFILTER_XT_MATCH_REALM=m
CONFIG_NETFILTER_XT_MATCH_RECENT=m
CONFIG_NETFILTER_XT_MATCH_SCTP=m
CONFIG_NETFILTER_XT_MATCH_STATE=m
CONFIG_NETFILTER_XT_MATCH_STATISTIC=m
CONFIG_NETFILTER_XT_MATCH_STRING=m
CONFIG_NETFILTER_XT_MATCH_TCPMSS=m
CONFIG_NETFILTER_XT_MATCH_TIME=m
CONFIG_NETFILTER_XT_MATCH_U32=m
CONFIG_NF_CONNTRACK_IPV4=m
CONFIG_IP_NF_QUEUE=m
CONFIG_IP_NF_IPTABLES=m
CONFIG_IP_NF_MATCH_AH=m
CONFIG_IP_NF_MATCH_ECN=m
CONFIG_IP_NF_MATCH_TTL=m
CONFIG_IP_NF_FILTER=m
CONFIG_IP_NF_TARGET_REJECT=m
CONFIG_IP_NF_TARGET_LOG=m
CONFIG_IP_NF_TARGET_ULOG=m
CONFIG_NF_NAT=m
CONFIG_IP_NF_TARGET_MASQUERADE=m
CONFIG_IP_NF_TARGET_NETMAP=m
CONFIG_IP_NF_TARGET_REDIRECT=m
CONFIG_NET_TCPPROBE=y
# CONFIG_WIRELESS is not set
CONFIG_NET_9P=y
CONFIG_NET_9P_DEBUG=y
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_DEVTMPFS=y
CONFIG_MTD=y
CONFIG_MTD_CHAR=y
CONFIG_MTD_BLOCK=y
CONFIG_MTD_CFI=y
CONFIG_MTD_CFI_ADV_OPTIONS=y
CONFIG_MTD_CFI_LE_BYTE_SWAP=y
CONFIG_MTD_CFI_INTELEXT=y
CONFIG_MTD_CFI_AMDSTD=y
CONFIG_MTD_CFI_STAA=y
CONFIG_MTD_PHYSMAP_OF=y
CONFIG_PROC_DEVICETREE=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_CRYPTOLOOP=y
CONFIG_BLK_DEV_NBD=m
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=65536
CONFIG_CDROM_PKTCDVD=y
CONFIG_MISC_DEVICES=y
CONFIG_BLK_DEV_SD=y
CONFIG_BLK_DEV_SR=y
CONFIG_BLK_DEV_SR_VENDOR=y
CONFIG_CHR_DEV_SG=y
CONFIG_SCSI_MULTI_LUN=y
CONFIG_SCSI_CONSTANTS=y
CONFIG_SCSI_SPI_ATTRS=y
CONFIG_SCSI_FC_ATTRS=y
CONFIG_SCSI_ISCSI_ATTRS=m
CONFIG_SCSI_SAS_ATTRS=m
CONFIG_SCSI_SRP_ATTRS=y
CONFIG_ATA=y
CONFIG_SATA_AHCI=y
CONFIG_SATA_SIL24=y
CONFIG_SATA_MV=y
CONFIG_SATA_SIL=y
CONFIG_PATA_CMD64X=y
CONFIG_PATA_MARVELL=y
CONFIG_PATA_SIL680=y
CONFIG_MD=y
CONFIG_BLK_DEV_MD=y
CONFIG_MD_LINEAR=y
CONFIG_BLK_DEV_DM=y
CONFIG_DM_CRYPT=y
CONFIG_DM_SNAPSHOT=y
CONFIG_DM_MIRROR=y
CONFIG_DM_ZERO=y
CONFIG_DM_UEVENT=y
CONFIG_NETDEVICES=y
CONFIG_TUN=y
CONFIG_E1000E=y
CONFIG_TIGON3=y
# CONFIG_WLAN is not set
# CONFIG_INPUT is not set
# CONFIG_SERIO is not set
# CONFIG_VT is not set
CONFIG_DEVPTS_MULTIPLE_INSTANCES=y
CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
CONFIG_HW_RANDOM=y
CONFIG_RAW_DRIVER=y
CONFIG_MAX_RAW_DEVS=1024
# CONFIG_HWMON is not set
# CONFIG_VGA_ARB is not set
# CONFIG_USB_SUPPORT is not set
CONFIG_EDAC=y
CONFIG_EDAC_MM_EDAC=y
CONFIG_RTC_CLASS=y
CONFIG_RTC_DRV_DS1511=y
CONFIG_RTC_DRV_DS1553=y
CONFIG_EXT2_FS=y
CONFIG_EXT2_FS_XATTR=y
CONFIG_EXT2_FS_POSIX_ACL=y
CONFIG_EXT2_FS_SECURITY=y
CONFIG_EXT2_FS_XIP=y
CONFIG_EXT3_FS=y
# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
CONFIG_EXT3_FS_POSIX_ACL=y
CONFIG_EXT3_FS_SECURITY=y
CONFIG_EXT4_FS=y
# CONFIG_DNOTIFY is not set
CONFIG_FUSE_FS=y
CONFIG_ISO9660_FS=y
CONFIG_JOLIET=y
CONFIG_ZISOFS=y
CONFIG_UDF_FS=m
CONFIG_MSDOS_FS=y
CONFIG_VFAT_FS=y
CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
CONFIG_TMPFS_POSIX_ACL=y
CONFIG_CONFIGFS_FS=m
CONFIG_CRAMFS=y
CONFIG_NFS_FS=y
CONFIG_NFS_V3=y
CONFIG_NFS_V3_ACL=y
CONFIG_NFS_V4=y
CONFIG_NFS_V4_1=y
CONFIG_ROOT_NFS=y
CONFIG_CIFS=y
CONFIG_CIFS_WEAK_PW_HASH=y
CONFIG_CIFS_XATTR=y
CONFIG_CIFS_POSIX=y
CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_ASCII=y
CONFIG_NLS_ISO8859_1=y
CONFIG_CRC_CCITT=m
CONFIG_CRC_T10DIF=y
CONFIG_LIBCRC32C=m
CONFIG_PRINTK_TIME=y
CONFIG_MAGIC_SYSRQ=y
CONFIG_STRIP_ASM_SYMS=y
CONFIG_DETECT_HUNG_TASK=y
# CONFIG_SCHED_DEBUG is not set
CONFIG_DEBUG_INFO=y
CONFIG_FTRACE_SYSCALLS=y
CONFIG_PPC_EMULATED_STATS=y
CONFIG_XMON=y
CONFIG_XMON_DEFAULT=y
CONFIG_IRQ_DOMAIN_DEBUG=y
CONFIG_PPC_EARLY_DEBUG=y
CONFIG_KEYS_DEBUG_PROC_KEYS=y
CONFIG_CRYPTO_NULL=m
CONFIG_CRYPTO_TEST=m
CONFIG_CRYPTO_CCM=m
CONFIG_CRYPTO_GCM=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_MICHAEL_MIC=m
CONFIG_CRYPTO_SHA256=m
CONFIG_CRYPTO_SHA512=m
CONFIG_CRYPTO_TGR192=m
CONFIG_CRYPTO_WP512=m
CONFIG_CRYPTO_AES=m
CONFIG_CRYPTO_ANUBIS=m
CONFIG_CRYPTO_BLOWFISH=m
CONFIG_CRYPTO_CAST5=m
CONFIG_CRYPTO_CAST6=m
CONFIG_CRYPTO_KHAZAD=m
CONFIG_CRYPTO_SALSA20=m
CONFIG_CRYPTO_SERPENT=m
CONFIG_CRYPTO_TEA=m
CONFIG_CRYPTO_TWOFISH=m
CONFIG_CRYPTO_LZO=m
# CONFIG_CRYPTO_ANSI_CPRNG is not set
CONFIG_VIRTUALIZATION=y
...@@ -489,7 +489,6 @@ typedef struct scc_trans { ...@@ -489,7 +489,6 @@ typedef struct scc_trans {
#define FCC_GFMR_TCI ((uint)0x20000000) #define FCC_GFMR_TCI ((uint)0x20000000)
#define FCC_GFMR_TRX ((uint)0x10000000) #define FCC_GFMR_TRX ((uint)0x10000000)
#define FCC_GFMR_TTX ((uint)0x08000000) #define FCC_GFMR_TTX ((uint)0x08000000)
#define FCC_GFMR_TTX ((uint)0x08000000)
#define FCC_GFMR_CDP ((uint)0x04000000) #define FCC_GFMR_CDP ((uint)0x04000000)
#define FCC_GFMR_CTSP ((uint)0x02000000) #define FCC_GFMR_CTSP ((uint)0x02000000)
#define FCC_GFMR_CDS ((uint)0x01000000) #define FCC_GFMR_CDS ((uint)0x01000000)
......
...@@ -254,6 +254,7 @@ void *eeh_pe_traverse(struct eeh_pe *root, ...@@ -254,6 +254,7 @@ void *eeh_pe_traverse(struct eeh_pe *root,
void *eeh_pe_dev_traverse(struct eeh_pe *root, void *eeh_pe_dev_traverse(struct eeh_pe *root,
eeh_traverse_func fn, void *flag); eeh_traverse_func fn, void *flag);
void eeh_pe_restore_bars(struct eeh_pe *pe); void eeh_pe_restore_bars(struct eeh_pe *pe);
const char *eeh_pe_loc_get(struct eeh_pe *pe);
struct pci_bus *eeh_pe_bus_get(struct eeh_pe *pe); struct pci_bus *eeh_pe_bus_get(struct eeh_pe *pe);
void *eeh_dev_init(struct device_node *dn, void *data); void *eeh_dev_init(struct device_node *dn, void *data);
......
...@@ -33,7 +33,7 @@ struct eeh_event { ...@@ -33,7 +33,7 @@ struct eeh_event {
int eeh_event_init(void); int eeh_event_init(void);
int eeh_send_failure_event(struct eeh_pe *pe); int eeh_send_failure_event(struct eeh_pe *pe);
void eeh_remove_event(struct eeh_pe *pe); void eeh_remove_event(struct eeh_pe *pe, bool force);
void eeh_handle_event(struct eeh_pe *pe); void eeh_handle_event(struct eeh_pe *pe);
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */
......
...@@ -223,10 +223,6 @@ typedef struct { ...@@ -223,10 +223,6 @@ typedef struct {
unsigned int id; unsigned int id;
unsigned int active; unsigned int active;
unsigned long vdso_base; unsigned long vdso_base;
#ifdef CONFIG_PPC_ICSWX
struct spinlock *cop_lockp; /* guard cop related stuff */
unsigned long acop; /* mask of enabled coprocessor types */
#endif /* CONFIG_PPC_ICSWX */
#ifdef CONFIG_PPC_MM_SLICES #ifdef CONFIG_PPC_MM_SLICES
u64 low_slices_psize; /* SLB page size encodings */ u64 low_slices_psize; /* SLB page size encodings */
u64 high_slices_psize; /* 4 bits per slice for now */ u64 high_slices_psize; /* 4 bits per slice for now */
......
...@@ -599,9 +599,9 @@ enum { ...@@ -599,9 +599,9 @@ enum {
}; };
struct OpalIoPhbErrorCommon { struct OpalIoPhbErrorCommon {
uint32_t version; __be32 version;
uint32_t ioType; __be32 ioType;
uint32_t len; __be32 len;
}; };
struct OpalIoP7IOCPhbErrorData { struct OpalIoP7IOCPhbErrorData {
...@@ -666,64 +666,64 @@ struct OpalIoP7IOCPhbErrorData { ...@@ -666,64 +666,64 @@ struct OpalIoP7IOCPhbErrorData {
struct OpalIoPhb3ErrorData { struct OpalIoPhb3ErrorData {
struct OpalIoPhbErrorCommon common; struct OpalIoPhbErrorCommon common;
uint32_t brdgCtl; __be32 brdgCtl;
/* PHB3 UTL regs */ /* PHB3 UTL regs */
uint32_t portStatusReg; __be32 portStatusReg;
uint32_t rootCmplxStatus; __be32 rootCmplxStatus;
uint32_t busAgentStatus; __be32 busAgentStatus;
/* PHB3 cfg regs */ /* PHB3 cfg regs */
uint32_t deviceStatus; __be32 deviceStatus;
uint32_t slotStatus; __be32 slotStatus;
uint32_t linkStatus; __be32 linkStatus;
uint32_t devCmdStatus; __be32 devCmdStatus;
uint32_t devSecStatus; __be32 devSecStatus;
/* cfg AER regs */ /* cfg AER regs */
uint32_t rootErrorStatus; __be32 rootErrorStatus;
uint32_t uncorrErrorStatus; __be32 uncorrErrorStatus;
uint32_t corrErrorStatus; __be32 corrErrorStatus;
uint32_t tlpHdr1; __be32 tlpHdr1;
uint32_t tlpHdr2; __be32 tlpHdr2;
uint32_t tlpHdr3; __be32 tlpHdr3;
uint32_t tlpHdr4; __be32 tlpHdr4;
uint32_t sourceId; __be32 sourceId;
uint32_t rsv3; __be32 rsv3;
/* Record data about the call to allocate a buffer */ /* Record data about the call to allocate a buffer */
uint64_t errorClass; __be64 errorClass;
uint64_t correlator; __be64 correlator;
uint64_t nFir; /* 000 */ __be64 nFir; /* 000 */
uint64_t nFirMask; /* 003 */ __be64 nFirMask; /* 003 */
uint64_t nFirWOF; /* 008 */ __be64 nFirWOF; /* 008 */
/* PHB3 MMIO Error Regs */ /* PHB3 MMIO Error Regs */
uint64_t phbPlssr; /* 120 */ __be64 phbPlssr; /* 120 */
uint64_t phbCsr; /* 110 */ __be64 phbCsr; /* 110 */
uint64_t lemFir; /* C00 */ __be64 lemFir; /* C00 */
uint64_t lemErrorMask; /* C18 */ __be64 lemErrorMask; /* C18 */
uint64_t lemWOF; /* C40 */ __be64 lemWOF; /* C40 */
uint64_t phbErrorStatus; /* C80 */ __be64 phbErrorStatus; /* C80 */
uint64_t phbFirstErrorStatus; /* C88 */ __be64 phbFirstErrorStatus; /* C88 */
uint64_t phbErrorLog0; /* CC0 */ __be64 phbErrorLog0; /* CC0 */
uint64_t phbErrorLog1; /* CC8 */ __be64 phbErrorLog1; /* CC8 */
uint64_t mmioErrorStatus; /* D00 */ __be64 mmioErrorStatus; /* D00 */
uint64_t mmioFirstErrorStatus; /* D08 */ __be64 mmioFirstErrorStatus; /* D08 */
uint64_t mmioErrorLog0; /* D40 */ __be64 mmioErrorLog0; /* D40 */
uint64_t mmioErrorLog1; /* D48 */ __be64 mmioErrorLog1; /* D48 */
uint64_t dma0ErrorStatus; /* D80 */ __be64 dma0ErrorStatus; /* D80 */
uint64_t dma0FirstErrorStatus; /* D88 */ __be64 dma0FirstErrorStatus; /* D88 */
uint64_t dma0ErrorLog0; /* DC0 */ __be64 dma0ErrorLog0; /* DC0 */
uint64_t dma0ErrorLog1; /* DC8 */ __be64 dma0ErrorLog1; /* DC8 */
uint64_t dma1ErrorStatus; /* E00 */ __be64 dma1ErrorStatus; /* E00 */
uint64_t dma1FirstErrorStatus; /* E08 */ __be64 dma1FirstErrorStatus; /* E08 */
uint64_t dma1ErrorLog0; /* E40 */ __be64 dma1ErrorLog0; /* E40 */
uint64_t dma1ErrorLog1; /* E48 */ __be64 dma1ErrorLog1; /* E48 */
uint64_t pestA[OPAL_PHB3_NUM_PEST_REGS]; __be64 pestA[OPAL_PHB3_NUM_PEST_REGS];
uint64_t pestB[OPAL_PHB3_NUM_PEST_REGS]; __be64 pestB[OPAL_PHB3_NUM_PEST_REGS];
}; };
enum { enum {
...@@ -851,8 +851,8 @@ int64_t opal_pci_mask_pe_error(uint64_t phb_id, uint16_t pe_number, uint8_t erro ...@@ -851,8 +851,8 @@ int64_t opal_pci_mask_pe_error(uint64_t phb_id, uint16_t pe_number, uint8_t erro
int64_t opal_set_slot_led_status(uint64_t phb_id, uint64_t slot_id, uint8_t led_type, uint8_t led_action); int64_t opal_set_slot_led_status(uint64_t phb_id, uint64_t slot_id, uint8_t led_type, uint8_t led_action);
int64_t opal_get_epow_status(__be64 *status); int64_t opal_get_epow_status(__be64 *status);
int64_t opal_set_system_attention_led(uint8_t led_action); int64_t opal_set_system_attention_led(uint8_t led_action);
int64_t opal_pci_next_error(uint64_t phb_id, uint64_t *first_frozen_pe, int64_t opal_pci_next_error(uint64_t phb_id, __be64 *first_frozen_pe,
uint16_t *pci_error_type, uint16_t *severity); __be16 *pci_error_type, __be16 *severity);
int64_t opal_pci_poll(uint64_t phb_id); int64_t opal_pci_poll(uint64_t phb_id);
int64_t opal_return_cpu(void); int64_t opal_return_cpu(void);
int64_t opal_reinit_cpus(uint64_t flags); int64_t opal_reinit_cpus(uint64_t flags);
......
...@@ -110,15 +110,6 @@ ...@@ -110,15 +110,6 @@
#define TLB1_UR ASM_CONST(0x0000000000000002) #define TLB1_UR ASM_CONST(0x0000000000000002)
#define TLB1_SR ASM_CONST(0x0000000000000001) #define TLB1_SR ASM_CONST(0x0000000000000001)
#ifdef CONFIG_PPC_EARLY_DEBUG_WSP
#define WSP_UART_PHYS 0xffc000c000
/* This needs to be careful chosen to hit a !0 congruence class
* in the TLB since we bolt it in way 3, which is already occupied
* by our linear mapping primary bolted entry in CC 0.
*/
#define WSP_UART_VIRT 0xf000000000001000
#endif
/* A2 erativax attributes definitions */ /* A2 erativax attributes definitions */
#define ERATIVAX_RS_IS_ALL 0x000 #define ERATIVAX_RS_IS_ALL 0x000
#define ERATIVAX_RS_IS_TID 0x040 #define ERATIVAX_RS_IS_TID 0x040
......
...@@ -16,13 +16,15 @@ struct thread_struct; ...@@ -16,13 +16,15 @@ struct thread_struct;
extern struct task_struct *_switch(struct thread_struct *prev, extern struct task_struct *_switch(struct thread_struct *prev,
struct thread_struct *next); struct thread_struct *next);
#ifdef CONFIG_PPC_BOOK3S_64 #ifdef CONFIG_PPC_BOOK3S_64
static inline void save_tar(struct thread_struct *prev) static inline void save_early_sprs(struct thread_struct *prev)
{ {
if (cpu_has_feature(CPU_FTR_ARCH_207S)) if (cpu_has_feature(CPU_FTR_ARCH_207S))
prev->tar = mfspr(SPRN_TAR); prev->tar = mfspr(SPRN_TAR);
if (cpu_has_feature(CPU_FTR_DSCR))
prev->dscr = mfspr(SPRN_DSCR);
} }
#else #else
static inline void save_tar(struct thread_struct *prev) {} static inline void save_early_sprs(struct thread_struct *prev) {}
#endif #endif
extern void enable_kernel_fp(void); extern void enable_kernel_fp(void);
...@@ -84,6 +86,8 @@ static inline void clear_task_ebb(struct task_struct *t) ...@@ -84,6 +86,8 @@ static inline void clear_task_ebb(struct task_struct *t)
{ {
#ifdef CONFIG_PPC_BOOK3S_64 #ifdef CONFIG_PPC_BOOK3S_64
/* EBB perf events are not inherited, so clear all EBB state. */ /* EBB perf events are not inherited, so clear all EBB state. */
t->thread.ebbrr = 0;
t->thread.ebbhr = 0;
t->thread.bescr = 0; t->thread.bescr = 0;
t->thread.mmcr2 = 0; t->thread.mmcr2 = 0;
t->thread.mmcr0 = 0; t->thread.mmcr0 = 0;
......
/*
* Copyright 2011 Michael Ellerman, IBM Corp.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#ifndef __ASM_POWERPC_WSP_H
#define __ASM_POWERPC_WSP_H
extern int wsp_get_chip_id(struct device_node *dn);
#endif /* __ASM_POWERPC_WSP_H */
...@@ -41,5 +41,6 @@ ...@@ -41,5 +41,6 @@
#define PPC_FEATURE2_EBB 0x10000000 #define PPC_FEATURE2_EBB 0x10000000
#define PPC_FEATURE2_ISEL 0x08000000 #define PPC_FEATURE2_ISEL 0x08000000
#define PPC_FEATURE2_TAR 0x04000000 #define PPC_FEATURE2_TAR 0x04000000
#define PPC_FEATURE2_VEC_CRYPTO 0x02000000
#endif /* _UAPI__ASM_POWERPC_CPUTABLE_H */ #endif /* _UAPI__ASM_POWERPC_CPUTABLE_H */
...@@ -43,7 +43,6 @@ obj-$(CONFIG_PPC_BOOK3S_64) += cpu_setup_power.o ...@@ -43,7 +43,6 @@ obj-$(CONFIG_PPC_BOOK3S_64) += cpu_setup_power.o
obj-$(CONFIG_PPC_BOOK3S_64) += mce.o mce_power.o obj-$(CONFIG_PPC_BOOK3S_64) += mce.o mce_power.o
obj64-$(CONFIG_RELOCATABLE) += reloc_64.o obj64-$(CONFIG_RELOCATABLE) += reloc_64.o
obj-$(CONFIG_PPC_BOOK3E_64) += exceptions-64e.o idle_book3e.o obj-$(CONFIG_PPC_BOOK3E_64) += exceptions-64e.o idle_book3e.o
obj-$(CONFIG_PPC_A2) += cpu_setup_a2.o
obj-$(CONFIG_PPC64) += vdso64/ obj-$(CONFIG_PPC64) += vdso64/
obj-$(CONFIG_ALTIVEC) += vecemu.o obj-$(CONFIG_ALTIVEC) += vecemu.o
obj-$(CONFIG_PPC_970_NAP) += idle_power4.o obj-$(CONFIG_PPC_970_NAP) += idle_power4.o
......
/*
* A2 specific assembly support code
*
* Copyright 2009 Ben Herrenschmidt, IBM Corp.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <asm/asm-offsets.h>
#include <asm/ppc_asm.h>
#include <asm/ppc-opcode.h>
#include <asm/processor.h>
#include <asm/reg_a2.h>
#include <asm/reg.h>
#include <asm/thread_info.h>
/*
* Disable thdid and class fields in ERATs to bump PID to full 14 bits capacity.
* This also prevents external LPID accesses but that isn't a problem when not a
* guest. Under PV, this setting will be ignored and MMUCR will return the right
* number of PID bits we can use.
*/
#define MMUCR1_EXTEND_PID \
(MMUCR1_ICTID | MMUCR1_ITTID | MMUCR1_DCTID | \
MMUCR1_DTTID | MMUCR1_DCCD)
/*
* Use extended PIDs if enabled.
* Don't clear the ERATs on context sync events and enable I & D LRU.
* Enable ERAT back invalidate when tlbwe overwrites an entry.
*/
#define INITIAL_MMUCR1 \
(MMUCR1_EXTEND_PID | MMUCR1_CSINV_NEVER | MMUCR1_IRRE | \
MMUCR1_DRRE | MMUCR1_TLBWE_BINV)
_GLOBAL(__setup_cpu_a2)
/* Some of these are actually thread local and some are
* core local but doing it always won't hurt
*/
#ifdef CONFIG_PPC_ICSWX
/* Make sure ACOP starts out as zero */
li r3,0
mtspr SPRN_ACOP,r3
/* Skip the following if we are in Guest mode */
mfmsr r3
andis. r0,r3,MSR_GS@h
bne _icswx_skip_guest
/* Enable icswx instruction */
mfspr r3,SPRN_A2_CCR2
ori r3,r3,A2_CCR2_ENABLE_ICSWX
mtspr SPRN_A2_CCR2,r3
/* Unmask all CTs in HACOP */
li r3,-1
mtspr SPRN_HACOP,r3
_icswx_skip_guest:
#endif /* CONFIG_PPC_ICSWX */
/* Enable doorbell */
mfspr r3,SPRN_A2_CCR2
oris r3,r3,A2_CCR2_ENABLE_PC@h
mtspr SPRN_A2_CCR2,r3
isync
/* Setup CCR0 to disable power saving for now as it's busted
* in the current implementations. Setup CCR1 to wake on
* interrupts normally (we write the default value but who
* knows what FW may have clobbered...)
*/
li r3,0
mtspr SPRN_A2_CCR0, r3
LOAD_REG_IMMEDIATE(r3,0x0f0f0f0f)
mtspr SPRN_A2_CCR1, r3
/* Initialise MMUCR1 */
lis r3,INITIAL_MMUCR1@h
ori r3,r3,INITIAL_MMUCR1@l
mtspr SPRN_MMUCR1,r3
/* Set MMUCR2 to enable 4K, 64K, 1M, 16M and 1G pages */
LOAD_REG_IMMEDIATE(r3, 0x000a7531)
mtspr SPRN_MMUCR2,r3
/* Set MMUCR3 to write all thids bit to the TLB */
LOAD_REG_IMMEDIATE(r3, 0x0000000f)
mtspr SPRN_MMUCR3,r3
/* Don't do ERAT stuff if running guest mode */
mfmsr r3
andis. r0,r3,MSR_GS@h
bne 1f
/* Now set the I-ERAT watermark to 15 */
lis r4,(MMUCR0_TLBSEL_I|MMUCR0_ECL)@h
mtspr SPRN_MMUCR0, r4
li r4,A2_IERAT_SIZE-1
PPC_ERATWE(R4,R4,3)
/* Now set the D-ERAT watermark to 31 */
lis r4,(MMUCR0_TLBSEL_D|MMUCR0_ECL)@h
mtspr SPRN_MMUCR0, r4
li r4,A2_DERAT_SIZE-1
PPC_ERATWE(R4,R4,3)
/* And invalidate the beast just in case. That won't get rid of
* a bolted entry though it will be in LRU and so will go away eventually
* but let's not bother for now
*/
PPC_ERATILX(0,0,R0)
1:
blr
_GLOBAL(__restore_cpu_a2)
b __setup_cpu_a2
...@@ -56,6 +56,7 @@ _GLOBAL(__setup_cpu_power8) ...@@ -56,6 +56,7 @@ _GLOBAL(__setup_cpu_power8)
li r0,0 li r0,0
mtspr SPRN_LPID,r0 mtspr SPRN_LPID,r0
mfspr r3,SPRN_LPCR mfspr r3,SPRN_LPCR
ori r3, r3, LPCR_PECEDH
bl __init_LPCR bl __init_LPCR
bl __init_HFSCR bl __init_HFSCR
bl __init_tlb_power8 bl __init_tlb_power8
...@@ -74,6 +75,7 @@ _GLOBAL(__restore_cpu_power8) ...@@ -74,6 +75,7 @@ _GLOBAL(__restore_cpu_power8)
li r0,0 li r0,0
mtspr SPRN_LPID,r0 mtspr SPRN_LPID,r0
mfspr r3,SPRN_LPCR mfspr r3,SPRN_LPCR
ori r3, r3, LPCR_PECEDH
bl __init_LPCR bl __init_LPCR
bl __init_HFSCR bl __init_HFSCR
bl __init_tlb_power8 bl __init_tlb_power8
......
...@@ -109,7 +109,8 @@ extern void __restore_cpu_e6500(void); ...@@ -109,7 +109,8 @@ extern void __restore_cpu_e6500(void);
PPC_FEATURE_PSERIES_PERFMON_COMPAT) PPC_FEATURE_PSERIES_PERFMON_COMPAT)
#define COMMON_USER2_POWER8 (PPC_FEATURE2_ARCH_2_07 | \ #define COMMON_USER2_POWER8 (PPC_FEATURE2_ARCH_2_07 | \
PPC_FEATURE2_HTM_COMP | PPC_FEATURE2_DSCR | \ PPC_FEATURE2_HTM_COMP | PPC_FEATURE2_DSCR | \
PPC_FEATURE2_ISEL | PPC_FEATURE2_TAR) PPC_FEATURE2_ISEL | PPC_FEATURE2_TAR | \
PPC_FEATURE2_VEC_CRYPTO)
#define COMMON_USER_PA6T (COMMON_USER_PPC64 | PPC_FEATURE_PA6T |\ #define COMMON_USER_PA6T (COMMON_USER_PPC64 | PPC_FEATURE_PA6T |\
PPC_FEATURE_TRUE_LE | \ PPC_FEATURE_TRUE_LE | \
PPC_FEATURE_HAS_ALTIVEC_COMP) PPC_FEATURE_HAS_ALTIVEC_COMP)
...@@ -2148,44 +2149,6 @@ static struct cpu_spec __initdata cpu_specs[] = { ...@@ -2148,44 +2149,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
} }
#endif /* CONFIG_PPC32 */ #endif /* CONFIG_PPC32 */
#endif /* CONFIG_E500 */ #endif /* CONFIG_E500 */
#ifdef CONFIG_PPC_A2
{ /* Standard A2 (>= DD2) + FPU core */
.pvr_mask = 0xffff0000,
.pvr_value = 0x00480000,
.cpu_name = "A2 (>= DD2)",
.cpu_features = CPU_FTRS_A2,
.cpu_user_features = COMMON_USER_PPC64,
.mmu_features = MMU_FTRS_A2,
.icache_bsize = 64,
.dcache_bsize = 64,
.num_pmcs = 0,
.cpu_setup = __setup_cpu_a2,
.cpu_restore = __restore_cpu_a2,
.machine_check = machine_check_generic,
.platform = "ppca2",
},
{ /* This is a default entry to get going, to be replaced by
* a real one at some stage
*/
#define CPU_FTRS_BASE_BOOK3E (CPU_FTR_USE_TB | \
CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_SMT | \
CPU_FTR_NODSISRALIGN | CPU_FTR_NOEXECUTE)
.pvr_mask = 0x00000000,
.pvr_value = 0x00000000,
.cpu_name = "Book3E",
.cpu_features = CPU_FTRS_BASE_BOOK3E,
.cpu_user_features = COMMON_USER_PPC64,
.mmu_features = MMU_FTR_TYPE_3E | MMU_FTR_USE_TLBILX |
MMU_FTR_USE_TLBIVAX_BCAST |
MMU_FTR_LOCK_BCAST_INVAL,
.icache_bsize = 64,
.dcache_bsize = 64,
.num_pmcs = 0,
.machine_check = machine_check_generic,
.platform = "power6",
},
#endif /* CONFIG_PPC_A2 */
}; };
static struct cpu_spec the_cpu_spec; static struct cpu_spec the_cpu_spec;
......
...@@ -330,8 +330,8 @@ static int eeh_phb_check_failure(struct eeh_pe *pe) ...@@ -330,8 +330,8 @@ static int eeh_phb_check_failure(struct eeh_pe *pe)
eeh_pe_state_mark(phb_pe, EEH_PE_ISOLATED); eeh_pe_state_mark(phb_pe, EEH_PE_ISOLATED);
eeh_serialize_unlock(flags); eeh_serialize_unlock(flags);
pr_err("EEH: PHB#%x failure detected\n", pr_err("EEH: PHB#%x failure detected, location: %s\n",
phb_pe->phb->global_number); phb_pe->phb->global_number, eeh_pe_loc_get(phb_pe));
dump_stack(); dump_stack();
eeh_send_failure_event(phb_pe); eeh_send_failure_event(phb_pe);
...@@ -358,10 +358,11 @@ static int eeh_phb_check_failure(struct eeh_pe *pe) ...@@ -358,10 +358,11 @@ static int eeh_phb_check_failure(struct eeh_pe *pe)
int eeh_dev_check_failure(struct eeh_dev *edev) int eeh_dev_check_failure(struct eeh_dev *edev)
{ {
int ret; int ret;
int active_flags = (EEH_STATE_MMIO_ACTIVE | EEH_STATE_DMA_ACTIVE);
unsigned long flags; unsigned long flags;
struct device_node *dn; struct device_node *dn;
struct pci_dev *dev; struct pci_dev *dev;
struct eeh_pe *pe; struct eeh_pe *pe, *parent_pe, *phb_pe;
int rc = 0; int rc = 0;
const char *location; const char *location;
...@@ -439,14 +440,34 @@ int eeh_dev_check_failure(struct eeh_dev *edev) ...@@ -439,14 +440,34 @@ int eeh_dev_check_failure(struct eeh_dev *edev)
*/ */
if ((ret < 0) || if ((ret < 0) ||
(ret == EEH_STATE_NOT_SUPPORT) || (ret == EEH_STATE_NOT_SUPPORT) ||
(ret & (EEH_STATE_MMIO_ACTIVE | EEH_STATE_DMA_ACTIVE)) == ((ret & active_flags) == active_flags)) {
(EEH_STATE_MMIO_ACTIVE | EEH_STATE_DMA_ACTIVE)) {
eeh_stats.false_positives++; eeh_stats.false_positives++;
pe->false_positives++; pe->false_positives++;
rc = 0; rc = 0;
goto dn_unlock; goto dn_unlock;
} }
/*
* It should be corner case that the parent PE has been
* put into frozen state as well. We should take care
* that at first.
*/
parent_pe = pe->parent;
while (parent_pe) {
/* Hit the ceiling ? */
if (parent_pe->type & EEH_PE_PHB)
break;
/* Frozen parent PE ? */
ret = eeh_ops->get_state(parent_pe, NULL);
if (ret > 0 &&
(ret & active_flags) != active_flags)
pe = parent_pe;
/* Next parent level */
parent_pe = parent_pe->parent;
}
eeh_stats.slot_resets++; eeh_stats.slot_resets++;
/* Avoid repeated reports of this failure, including problems /* Avoid repeated reports of this failure, including problems
...@@ -460,8 +481,11 @@ int eeh_dev_check_failure(struct eeh_dev *edev) ...@@ -460,8 +481,11 @@ int eeh_dev_check_failure(struct eeh_dev *edev)
* a stack trace will help the device-driver authors figure * a stack trace will help the device-driver authors figure
* out what happened. So print that out. * out what happened. So print that out.
*/ */
pr_err("EEH: Frozen PE#%x detected on PHB#%x\n", phb_pe = eeh_phb_pe_get(pe->phb);
pe->addr, pe->phb->global_number); pr_err("EEH: Frozen PHB#%x-PE#%x detected\n",
pe->phb->global_number, pe->addr);
pr_err("EEH: PE location: %s, PHB location: %s\n",
eeh_pe_loc_get(pe), eeh_pe_loc_get(phb_pe));
dump_stack(); dump_stack();
eeh_send_failure_event(pe); eeh_send_failure_event(pe);
......
...@@ -447,8 +447,9 @@ static void *eeh_pe_detach_dev(void *data, void *userdata) ...@@ -447,8 +447,9 @@ static void *eeh_pe_detach_dev(void *data, void *userdata)
* PE reset (for 3 times), we try to clear the frozen state * PE reset (for 3 times), we try to clear the frozen state
* for 3 times as well. * for 3 times as well.
*/ */
static int eeh_clear_pe_frozen_state(struct eeh_pe *pe) static void *__eeh_clear_pe_frozen_state(void *data, void *flag)
{ {
struct eeh_pe *pe = (struct eeh_pe *)data;
int i, rc; int i, rc;
for (i = 0; i < 3; i++) { for (i = 0; i < 3; i++) {
...@@ -461,13 +462,24 @@ static int eeh_clear_pe_frozen_state(struct eeh_pe *pe) ...@@ -461,13 +462,24 @@ static int eeh_clear_pe_frozen_state(struct eeh_pe *pe)
} }
/* The PE has been isolated, clear it */ /* The PE has been isolated, clear it */
if (rc) if (rc) {
pr_warn("%s: Can't clear frozen PHB#%x-PE#%x (%d)\n", pr_warn("%s: Can't clear frozen PHB#%x-PE#%x (%d)\n",
__func__, pe->phb->global_number, pe->addr, rc); __func__, pe->phb->global_number, pe->addr, rc);
else return (void *)pe;
}
return NULL;
}
static int eeh_clear_pe_frozen_state(struct eeh_pe *pe)
{
void *rc;
rc = eeh_pe_traverse(pe, __eeh_clear_pe_frozen_state, NULL);
if (!rc)
eeh_pe_state_clear(pe, EEH_PE_ISOLATED); eeh_pe_state_clear(pe, EEH_PE_ISOLATED);
return rc; return rc ? -EIO : 0;
} }
/** /**
...@@ -758,7 +770,7 @@ static void eeh_handle_special_event(void) ...@@ -758,7 +770,7 @@ static void eeh_handle_special_event(void)
eeh_serialize_lock(&flags); eeh_serialize_lock(&flags);
/* Purge all events */ /* Purge all events */
eeh_remove_event(NULL); eeh_remove_event(NULL, true);
list_for_each_entry(hose, &hose_list, list_node) { list_for_each_entry(hose, &hose_list, list_node) {
phb_pe = eeh_phb_pe_get(hose); phb_pe = eeh_phb_pe_get(hose);
...@@ -777,7 +789,7 @@ static void eeh_handle_special_event(void) ...@@ -777,7 +789,7 @@ static void eeh_handle_special_event(void)
eeh_serialize_lock(&flags); eeh_serialize_lock(&flags);
/* Purge all events of the PHB */ /* Purge all events of the PHB */
eeh_remove_event(pe); eeh_remove_event(pe, true);
if (rc == EEH_NEXT_ERR_DEAD_PHB) if (rc == EEH_NEXT_ERR_DEAD_PHB)
eeh_pe_state_mark(pe, EEH_PE_ISOLATED); eeh_pe_state_mark(pe, EEH_PE_ISOLATED);
......
...@@ -152,24 +152,33 @@ int eeh_send_failure_event(struct eeh_pe *pe) ...@@ -152,24 +152,33 @@ int eeh_send_failure_event(struct eeh_pe *pe)
/** /**
* eeh_remove_event - Remove EEH event from the queue * eeh_remove_event - Remove EEH event from the queue
* @pe: Event binding to the PE * @pe: Event binding to the PE
* @force: Event will be removed unconditionally
* *
* On PowerNV platform, we might have subsequent coming events * On PowerNV platform, we might have subsequent coming events
* is part of the former one. For that case, those subsequent * is part of the former one. For that case, those subsequent
* coming events are totally duplicated and unnecessary, thus * coming events are totally duplicated and unnecessary, thus
* they should be removed. * they should be removed.
*/ */
void eeh_remove_event(struct eeh_pe *pe) void eeh_remove_event(struct eeh_pe *pe, bool force)
{ {
unsigned long flags; unsigned long flags;
struct eeh_event *event, *tmp; struct eeh_event *event, *tmp;
/*
* If we have NULL PE passed in, we have dead IOC
* or we're sure we can report all existing errors
* by the caller.
*
* With "force", the event with associated PE that
* have been isolated, the event won't be removed
* to avoid event lost.
*/
spin_lock_irqsave(&eeh_eventlist_lock, flags); spin_lock_irqsave(&eeh_eventlist_lock, flags);
list_for_each_entry_safe(event, tmp, &eeh_eventlist, list) { list_for_each_entry_safe(event, tmp, &eeh_eventlist, list) {
/* if (!force && event->pe &&
* If we don't have valid PE passed in, that means (event->pe->state & EEH_PE_ISOLATED))
* we already have event corresponding to dead IOC continue;
* and all events should be purged.
*/
if (!pe) { if (!pe) {
list_del(&event->list); list_del(&event->list);
kfree(event); kfree(event);
......
...@@ -791,6 +791,66 @@ void eeh_pe_restore_bars(struct eeh_pe *pe) ...@@ -791,6 +791,66 @@ void eeh_pe_restore_bars(struct eeh_pe *pe)
eeh_pe_dev_traverse(pe, eeh_restore_one_device_bars, NULL); eeh_pe_dev_traverse(pe, eeh_restore_one_device_bars, NULL);
} }
/**
* eeh_pe_loc_get - Retrieve location code binding to the given PE
* @pe: EEH PE
*
* Retrieve the location code of the given PE. If the primary PE bus
* is root bus, we will grab location code from PHB device tree node
* or root port. Otherwise, the upstream bridge's device tree node
* of the primary PE bus will be checked for the location code.
*/
const char *eeh_pe_loc_get(struct eeh_pe *pe)
{
struct pci_controller *hose;
struct pci_bus *bus = eeh_pe_bus_get(pe);
struct pci_dev *pdev;
struct device_node *dn;
const char *loc;
if (!bus)
return "N/A";
/* PHB PE or root PE ? */
if (pci_is_root_bus(bus)) {
hose = pci_bus_to_host(bus);
loc = of_get_property(hose->dn,
"ibm,loc-code", NULL);
if (loc)
return loc;
loc = of_get_property(hose->dn,
"ibm,io-base-loc-code", NULL);
if (loc)
return loc;
pdev = pci_get_slot(bus, 0x0);
} else {
pdev = bus->self;
}
if (!pdev) {
loc = "N/A";
goto out;
}
dn = pci_device_to_OF_node(pdev);
if (!dn) {
loc = "N/A";
goto out;
}
loc = of_get_property(dn, "ibm,loc-code", NULL);
if (!loc)
loc = of_get_property(dn, "ibm,slot-location-code", NULL);
if (!loc)
loc = "N/A";
out:
if (pci_is_root_bus(bus) && pdev)
pci_dev_put(pdev);
return loc;
}
/** /**
* eeh_pe_bus_get - Retrieve PCI bus according to the given PE * eeh_pe_bus_get - Retrieve PCI bus according to the given PE
* @pe: EEH PE * @pe: EEH PE
......
...@@ -428,12 +428,6 @@ BEGIN_FTR_SECTION ...@@ -428,12 +428,6 @@ BEGIN_FTR_SECTION
std r24,THREAD_VRSAVE(r3) std r24,THREAD_VRSAVE(r3)
END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
#endif /* CONFIG_ALTIVEC */ #endif /* CONFIG_ALTIVEC */
#ifdef CONFIG_PPC64
BEGIN_FTR_SECTION
mfspr r25,SPRN_DSCR
std r25,THREAD_DSCR(r3)
END_FTR_SECTION_IFSET(CPU_FTR_DSCR)
#endif
and. r0,r0,r22 and. r0,r0,r22
beq+ 1f beq+ 1f
andc r22,r22,r0 andc r22,r22,r0
......
...@@ -1467,22 +1467,6 @@ a2_tlbinit_after_linear_map: ...@@ -1467,22 +1467,6 @@ a2_tlbinit_after_linear_map:
.globl a2_tlbinit_after_iprot_flush .globl a2_tlbinit_after_iprot_flush
a2_tlbinit_after_iprot_flush: a2_tlbinit_after_iprot_flush:
#ifdef CONFIG_PPC_EARLY_DEBUG_WSP
/* Now establish early debug mappings if applicable */
/* Restore the MAS0 we used for linear mapping load */
mtspr SPRN_MAS0,r11
lis r3,(MAS1_VALID | MAS1_IPROT)@h
ori r3,r3,(BOOK3E_PAGESZ_4K << MAS1_TSIZE_SHIFT)
mtspr SPRN_MAS1,r3
LOAD_REG_IMMEDIATE(r3, WSP_UART_VIRT | MAS2_I | MAS2_G)
mtspr SPRN_MAS2,r3
LOAD_REG_IMMEDIATE(r3, WSP_UART_PHYS | MAS3_SR | MAS3_SW)
mtspr SPRN_MAS7_MAS3,r3
/* re-use the MAS8 value from the linear mapping */
tlbwe
#endif /* CONFIG_PPC_EARLY_DEBUG_WSP */
PPC_TLBILX(0,0,R0) PPC_TLBILX(0,0,R0)
sync sync
isync isync
......
...@@ -439,9 +439,9 @@ BEGIN_FTR_SECTION ...@@ -439,9 +439,9 @@ BEGIN_FTR_SECTION
* R9 = CR * R9 = CR
* Original R9 to R13 is saved on PACA_EXMC * Original R9 to R13 is saved on PACA_EXMC
* *
* Switch to mc_emergency stack and handle re-entrancy (though we * Switch to mc_emergency stack and handle re-entrancy (we limit
* currently don't test for overflow). Save MCE registers srr1, * the nested MCE upto level 4 to avoid stack overflow).
* srr0, dar and dsisr and then set ME=1 * Save MCE registers srr1, srr0, dar and dsisr and then set ME=1
* *
* We use paca->in_mce to check whether this is the first entry or * We use paca->in_mce to check whether this is the first entry or
* nested machine check. We increment paca->in_mce to track nested * nested machine check. We increment paca->in_mce to track nested
...@@ -464,6 +464,9 @@ BEGIN_FTR_SECTION ...@@ -464,6 +464,9 @@ BEGIN_FTR_SECTION
0: subi r1,r1,INT_FRAME_SIZE /* alloc stack frame */ 0: subi r1,r1,INT_FRAME_SIZE /* alloc stack frame */
addi r10,r10,1 /* increment paca->in_mce */ addi r10,r10,1 /* increment paca->in_mce */
sth r10,PACA_IN_MCE(r13) sth r10,PACA_IN_MCE(r13)
/* Limit nested MCE to level 4 to avoid stack overflow */
cmpwi r10,4
bgt 2f /* Check if we hit limit of 4 */
std r11,GPR1(r1) /* Save r1 on the stack. */ std r11,GPR1(r1) /* Save r1 on the stack. */
std r11,0(r1) /* make stack chain pointer */ std r11,0(r1) /* make stack chain pointer */
mfspr r11,SPRN_SRR0 /* Save SRR0 */ mfspr r11,SPRN_SRR0 /* Save SRR0 */
...@@ -482,10 +485,23 @@ BEGIN_FTR_SECTION ...@@ -482,10 +485,23 @@ BEGIN_FTR_SECTION
ori r11,r11,MSR_RI /* turn on RI bit */ ori r11,r11,MSR_RI /* turn on RI bit */
ld r12,PACAKBASE(r13) /* get high part of &label */ ld r12,PACAKBASE(r13) /* get high part of &label */
LOAD_HANDLER(r12, machine_check_handle_early) LOAD_HANDLER(r12, machine_check_handle_early)
mtspr SPRN_SRR0,r12 1: mtspr SPRN_SRR0,r12
mtspr SPRN_SRR1,r11 mtspr SPRN_SRR1,r11
rfid rfid
b . /* prevent speculative execution */ b . /* prevent speculative execution */
2:
/* Stack overflow. Stay on emergency stack and panic.
* Keep the ME bit off while panic-ing, so that if we hit
* another machine check we checkstop.
*/
addi r1,r1,INT_FRAME_SIZE /* go back to previous stack frame */
ld r11,PACAKMSR(r13)
ld r12,PACAKBASE(r13)
LOAD_HANDLER(r12, unrecover_mce)
li r10,MSR_ME
andc r11,r11,r10 /* Turn off MSR_ME */
b 1b
b . /* prevent speculative execution */
END_FTR_SECTION_IFSET(CPU_FTR_HVMODE) END_FTR_SECTION_IFSET(CPU_FTR_HVMODE)
machine_check_pSeries: machine_check_pSeries:
...@@ -1389,6 +1405,7 @@ machine_check_handle_early: ...@@ -1389,6 +1405,7 @@ machine_check_handle_early:
bl save_nvgprs bl save_nvgprs
addi r3,r1,STACK_FRAME_OVERHEAD addi r3,r1,STACK_FRAME_OVERHEAD
bl machine_check_early bl machine_check_early
std r3,RESULT(r1) /* Save result */
ld r12,_MSR(r1) ld r12,_MSR(r1)
#ifdef CONFIG_PPC_P7_NAP #ifdef CONFIG_PPC_P7_NAP
/* /*
...@@ -1443,10 +1460,32 @@ machine_check_handle_early: ...@@ -1443,10 +1460,32 @@ machine_check_handle_early:
*/ */
andi. r11,r12,MSR_RI andi. r11,r12,MSR_RI
bne 2f bne 2f
1: addi r3,r1,STACK_FRAME_OVERHEAD 1: mfspr r11,SPRN_SRR0
bl unrecoverable_exception ld r10,PACAKBASE(r13)
b 1b LOAD_HANDLER(r10,unrecover_mce)
mtspr SPRN_SRR0,r10
ld r10,PACAKMSR(r13)
/*
* We are going down. But there are chances that we might get hit by
* another MCE during panic path and we may run into unstable state
* with no way out. Hence, turn ME bit off while going down, so that
* when another MCE is hit during panic path, system will checkstop
* and hypervisor will get restarted cleanly by SP.
*/
li r3,MSR_ME
andc r10,r10,r3 /* Turn off MSR_ME */
mtspr SPRN_SRR1,r10
rfid
b .
2: 2:
/*
* Check if we have successfully handled/recovered from error, if not
* then stay on emergency stack and panic.
*/
ld r3,RESULT(r1) /* Load result */
cmpdi r3,0 /* see if we handled MCE successfully */
beq 1b /* if !handled then panic */
/* /*
* Return from MC interrupt. * Return from MC interrupt.
* Queue up the MCE event so that we can log it later, while * Queue up the MCE event so that we can log it later, while
...@@ -1460,6 +1499,17 @@ machine_check_handle_early: ...@@ -1460,6 +1499,17 @@ machine_check_handle_early:
MACHINE_CHECK_HANDLER_WINDUP MACHINE_CHECK_HANDLER_WINDUP
b machine_check_pSeries b machine_check_pSeries
unrecover_mce:
/* Invoke machine_check_exception to print MCE event and panic. */
addi r3,r1,STACK_FRAME_OVERHEAD
bl machine_check_exception
/*
* We will not reach here. Even if we did, there is no way out. Call
* unrecoverable_exception and die.
*/
1: addi r3,r1,STACK_FRAME_OVERHEAD
bl unrecoverable_exception
b 1b
/* /*
* r13 points to the PACA, r9 contains the saved CR, * r13 points to the PACA, r9 contains the saved CR,
* r12 contain the saved SRR1, SRR0 is still ready for return * r12 contain the saved SRR1, SRR0 is still ready for return
......
...@@ -930,25 +930,6 @@ initial_mmu: ...@@ -930,25 +930,6 @@ initial_mmu:
tlbwe r4,r0,TLB_DATA /* Load the data portion of the entry */ tlbwe r4,r0,TLB_DATA /* Load the data portion of the entry */
tlbwe r3,r0,TLB_TAG /* Load the tag portion of the entry */ tlbwe r3,r0,TLB_TAG /* Load the tag portion of the entry */
#if defined(CONFIG_SERIAL_TEXT_DEBUG) && defined(SERIAL_DEBUG_IO_BASE)
/* Load a TLB entry for the UART, so that ppc4xx_progress() can use
* the UARTs nice and early. We use a 4k real==virtual mapping. */
lis r3,SERIAL_DEBUG_IO_BASE@h
ori r3,r3,SERIAL_DEBUG_IO_BASE@l
mr r4,r3
clrrwi r4,r4,12
ori r4,r4,(TLB_WR|TLB_I|TLB_M|TLB_G)
clrrwi r3,r3,12
ori r3,r3,(TLB_VALID | TLB_PAGESZ(PAGESZ_4K))
li r0,0 /* TLB slot 0 */
tlbwe r4,r0,TLB_DATA
tlbwe r3,r0,TLB_TAG
#endif /* CONFIG_SERIAL_DEBUG_TEXT && SERIAL_DEBUG_IO_BASE */
isync isync
/* Establish the exception vector base /* Establish the exception vector base
......
...@@ -755,15 +755,15 @@ struct task_struct *__switch_to(struct task_struct *prev, ...@@ -755,15 +755,15 @@ struct task_struct *__switch_to(struct task_struct *prev,
WARN_ON(!irqs_disabled()); WARN_ON(!irqs_disabled());
/* Back up the TAR across context switches. /* Back up the TAR and DSCR across context switches.
* Note that the TAR is not available for use in the kernel. (To * Note that the TAR is not available for use in the kernel. (To
* provide this, the TAR should be backed up/restored on exception * provide this, the TAR should be backed up/restored on exception
* entry/exit instead, and be in pt_regs. FIXME, this should be in * entry/exit instead, and be in pt_regs. FIXME, this should be in
* pt_regs anyway (for debug).) * pt_regs anyway (for debug).)
* Save the TAR here before we do treclaim/trecheckpoint as these * Save the TAR and DSCR here before we do treclaim/trecheckpoint as
* will change the TAR. * these will change them.
*/ */
save_tar(&prev->thread); save_early_sprs(&prev->thread);
__switch_to_tm(prev); __switch_to_tm(prev);
......
...@@ -471,7 +471,7 @@ void __init smp_setup_cpu_maps(void) ...@@ -471,7 +471,7 @@ void __init smp_setup_cpu_maps(void)
for (j = 0; j < nthreads && cpu < nr_cpu_ids; j++) { for (j = 0; j < nthreads && cpu < nr_cpu_ids; j++) {
DBG(" thread %d -> cpu %d (hard id %d)\n", DBG(" thread %d -> cpu %d (hard id %d)\n",
j, cpu, be32_to_cpu(intserv[j])); j, cpu, be32_to_cpu(intserv[j]));
set_cpu_present(cpu, true); set_cpu_present(cpu, of_device_is_available(dn));
set_hard_smp_processor_id(cpu, be32_to_cpu(intserv[j])); set_hard_smp_processor_id(cpu, be32_to_cpu(intserv[j]));
set_cpu_possible(cpu, true); set_cpu_possible(cpu, true);
cpu++; cpu++;
......
...@@ -551,7 +551,7 @@ void timer_interrupt(struct pt_regs * regs) ...@@ -551,7 +551,7 @@ void timer_interrupt(struct pt_regs * regs)
may_hard_irq_enable(); may_hard_irq_enable();
#if defined(CONFIG_PPC32) && defined(CONFIG_PMAC) #if defined(CONFIG_PPC32) && defined(CONFIG_PPC_PMAC)
if (atomic_read(&ppc_n_lost_interrupts) != 0) if (atomic_read(&ppc_n_lost_interrupts) != 0)
do_IRQ(regs); do_IRQ(regs);
#endif #endif
......
...@@ -295,6 +295,8 @@ long machine_check_early(struct pt_regs *regs) ...@@ -295,6 +295,8 @@ long machine_check_early(struct pt_regs *regs)
{ {
long handled = 0; long handled = 0;
__get_cpu_var(irq_stat).mce_exceptions++;
if (cur_cpu_spec && cur_cpu_spec->machine_check_early) if (cur_cpu_spec && cur_cpu_spec->machine_check_early)
handled = cur_cpu_spec->machine_check_early(regs); handled = cur_cpu_spec->machine_check_early(regs);
return handled; return handled;
......
...@@ -62,8 +62,6 @@ void __init udbg_early_init(void) ...@@ -62,8 +62,6 @@ void __init udbg_early_init(void)
udbg_init_cpm(); udbg_init_cpm();
#elif defined(CONFIG_PPC_EARLY_DEBUG_USBGECKO) #elif defined(CONFIG_PPC_EARLY_DEBUG_USBGECKO)
udbg_init_usbgecko(); udbg_init_usbgecko();
#elif defined(CONFIG_PPC_EARLY_DEBUG_WSP)
udbg_init_wsp();
#elif defined(CONFIG_PPC_EARLY_DEBUG_MEMCONS) #elif defined(CONFIG_PPC_EARLY_DEBUG_MEMCONS)
/* In memory console */ /* In memory console */
udbg_init_memcons(); udbg_init_memcons();
......
...@@ -296,14 +296,3 @@ void __init udbg_init_40x_realmode(void) ...@@ -296,14 +296,3 @@ void __init udbg_init_40x_realmode(void)
} }
#endif /* CONFIG_PPC_EARLY_DEBUG_40x */ #endif /* CONFIG_PPC_EARLY_DEBUG_40x */
#ifdef CONFIG_PPC_EARLY_DEBUG_WSP
void __init udbg_init_wsp(void)
{
udbg_uart_init_mmio((void *)WSP_UART_VIRT, 1);
udbg_uart_setup(57600, 50000000);
}
#endif /* CONFIG_PPC_EARLY_DEBUG_WSP */
...@@ -113,10 +113,8 @@ static long kvmppc_realmode_mc_power7(struct kvm_vcpu *vcpu) ...@@ -113,10 +113,8 @@ static long kvmppc_realmode_mc_power7(struct kvm_vcpu *vcpu)
* We assume that if the condition is recovered then linux host * We assume that if the condition is recovered then linux host
* will have generated an error log event that we will pick * will have generated an error log event that we will pick
* up and log later. * up and log later.
* Don't release mce event now. In case if condition is not * Don't release mce event now. We will queue up the event so that
* recovered we do guest exit and go back to linux host machine * we can log the MCE event info on host console.
* check handler. Hence we need make sure that current mce event
* is available for linux host to consume.
*/ */
if (!get_mce_event(&mce_evt, MCE_EVENT_DONTRELEASE)) if (!get_mce_event(&mce_evt, MCE_EVENT_DONTRELEASE))
goto out; goto out;
...@@ -128,11 +126,12 @@ static long kvmppc_realmode_mc_power7(struct kvm_vcpu *vcpu) ...@@ -128,11 +126,12 @@ static long kvmppc_realmode_mc_power7(struct kvm_vcpu *vcpu)
out: out:
/* /*
* If we have handled the error, then release the mce event because * We are now going enter guest either through machine check
* we will be delivering machine check to guest. * interrupt (for unhandled errors) or will continue from
* current HSRR0 (for handled errors) in guest. Hence
* queue up the event so that we can log it from host console later.
*/ */
if (handled) machine_check_queue_event();
release_mce_event();
return handled; return handled;
} }
......
...@@ -2257,15 +2257,28 @@ machine_check_realmode: ...@@ -2257,15 +2257,28 @@ machine_check_realmode:
mr r3, r9 /* get vcpu pointer */ mr r3, r9 /* get vcpu pointer */
bl kvmppc_realmode_machine_check bl kvmppc_realmode_machine_check
nop nop
cmpdi r3, 0 /* continue exiting from guest? */ cmpdi r3, 0 /* Did we handle MCE ? */
ld r9, HSTATE_KVM_VCPU(r13) ld r9, HSTATE_KVM_VCPU(r13)
li r12, BOOK3S_INTERRUPT_MACHINE_CHECK li r12, BOOK3S_INTERRUPT_MACHINE_CHECK
beq mc_cont /*
* Deliver unhandled/fatal (e.g. UE) MCE errors to guest through
* machine check interrupt (set HSRR0 to 0x200). And for handled
* errors (no-fatal), just go back to guest execution with current
* HSRR0 instead of exiting guest. This new approach will inject
* machine check to guest for fatal error causing guest to crash.
*
* The old code used to return to host for unhandled errors which
* was causing guest to hang with soft lockups inside guest and
* makes it difficult to recover guest instance.
*/
ld r10, VCPU_PC(r9)
ld r11, VCPU_MSR(r9)
bne 2f /* Continue guest execution. */
/* If not, deliver a machine check. SRR0/1 are already set */ /* If not, deliver a machine check. SRR0/1 are already set */
li r10, BOOK3S_INTERRUPT_MACHINE_CHECK li r10, BOOK3S_INTERRUPT_MACHINE_CHECK
ld r11, VCPU_MSR(r9) ld r11, VCPU_MSR(r9)
bl kvmppc_msr_interrupt bl kvmppc_msr_interrupt
b fast_interrupt_c_return 2: b fast_interrupt_c_return
/* /*
* Check the reason we woke from nap, and take appropriate action. * Check the reason we woke from nap, and take appropriate action.
......
...@@ -1470,7 +1470,7 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr) ...@@ -1470,7 +1470,7 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
regs->gpr[rd] = byterev_4(val); regs->gpr[rd] = byterev_4(val);
goto ldst_done; goto ldst_done;
#ifdef CONFIG_PPC_CPU #ifdef CONFIG_PPC_FPU
case 535: /* lfsx */ case 535: /* lfsx */
case 567: /* lfsux */ case 567: /* lfsux */
if (!(regs->msr & MSR_FP)) if (!(regs->msr & MSR_FP))
......
...@@ -19,7 +19,6 @@ source "arch/powerpc/platforms/embedded6xx/Kconfig" ...@@ -19,7 +19,6 @@ source "arch/powerpc/platforms/embedded6xx/Kconfig"
source "arch/powerpc/platforms/44x/Kconfig" source "arch/powerpc/platforms/44x/Kconfig"
source "arch/powerpc/platforms/40x/Kconfig" source "arch/powerpc/platforms/40x/Kconfig"
source "arch/powerpc/platforms/amigaone/Kconfig" source "arch/powerpc/platforms/amigaone/Kconfig"
source "arch/powerpc/platforms/wsp/Kconfig"
config KVM_GUEST config KVM_GUEST
bool "KVM Guest support" bool "KVM Guest support"
......
...@@ -148,10 +148,6 @@ config POWER4 ...@@ -148,10 +148,6 @@ config POWER4
depends on PPC64 && PPC_BOOK3S depends on PPC64 && PPC_BOOK3S
def_bool y def_bool y
config PPC_A2
bool
depends on PPC_BOOK3E_64
config TUNE_CELL config TUNE_CELL
bool "Optimize for Cell Broadband Engine" bool "Optimize for Cell Broadband Engine"
depends on PPC64 && PPC_BOOK3S depends on PPC64 && PPC_BOOK3S
...@@ -280,7 +276,7 @@ config VSX ...@@ -280,7 +276,7 @@ config VSX
config PPC_ICSWX config PPC_ICSWX
bool "Support for PowerPC icswx coprocessor instruction" bool "Support for PowerPC icswx coprocessor instruction"
depends on POWER4 || PPC_A2 depends on POWER4
default n default n
---help--- ---help---
......
...@@ -22,4 +22,3 @@ obj-$(CONFIG_PPC_CELL) += cell/ ...@@ -22,4 +22,3 @@ obj-$(CONFIG_PPC_CELL) += cell/
obj-$(CONFIG_PPC_PS3) += ps3/ obj-$(CONFIG_PPC_PS3) += ps3/
obj-$(CONFIG_EMBEDDED6xx) += embedded6xx/ obj-$(CONFIG_EMBEDDED6xx) += embedded6xx/
obj-$(CONFIG_AMIGAONE) += amigaone/ obj-$(CONFIG_AMIGAONE) += amigaone/
obj-$(CONFIG_PPC_WSP) += wsp/
...@@ -35,7 +35,6 @@ ...@@ -35,7 +35,6 @@
#define SPUFS_PS_MAP_SIZE 0x20000 #define SPUFS_PS_MAP_SIZE 0x20000
#define SPUFS_MFC_MAP_SIZE 0x1000 #define SPUFS_MFC_MAP_SIZE 0x1000
#define SPUFS_CNTL_MAP_SIZE 0x1000 #define SPUFS_CNTL_MAP_SIZE 0x1000
#define SPUFS_CNTL_MAP_SIZE 0x1000
#define SPUFS_SIGNAL_MAP_SIZE PAGE_SIZE #define SPUFS_SIGNAL_MAP_SIZE PAGE_SIZE
#define SPUFS_MSS_MAP_SIZE 0x1000 #define SPUFS_MSS_MAP_SIZE 0x1000
......
...@@ -17,6 +17,7 @@ config PPC_POWERNV ...@@ -17,6 +17,7 @@ config PPC_POWERNV
select CPU_FREQ_GOV_USERSPACE select CPU_FREQ_GOV_USERSPACE
select CPU_FREQ_GOV_ONDEMAND select CPU_FREQ_GOV_ONDEMAND
select CPU_FREQ_GOV_CONSERVATIVE select CPU_FREQ_GOV_CONSERVATIVE
select PPC_DOORBELL
default y default y
config PPC_POWERNV_RTAS config PPC_POWERNV_RTAS
......
obj-y += setup.o opal-takeover.o opal-wrappers.o opal.o opal-async.o obj-y += setup.o opal-takeover.o opal-wrappers.o opal.o opal-async.o
obj-y += opal-rtc.o opal-nvram.o opal-lpc.o opal-flash.o obj-y += opal-rtc.o opal-nvram.o opal-lpc.o opal-flash.o
obj-y += rng.o opal-elog.o opal-dump.o opal-sysparam.o opal-sensor.o obj-y += rng.o opal-elog.o opal-dump.o opal-sysparam.o opal-sensor.o
obj-y += opal-msglog.o subcore.o subcore-asm.o obj-y += opal-msglog.o
obj-$(CONFIG_SMP) += smp.o obj-$(CONFIG_SMP) += smp.o subcore.o subcore-asm.o
obj-$(CONFIG_PCI) += pci.o pci-p5ioc2.o pci-ioda.o obj-$(CONFIG_PCI) += pci.o pci-p5ioc2.o pci-ioda.o
obj-$(CONFIG_EEH) += eeh-ioda.o eeh-powernv.o obj-$(CONFIG_EEH) += eeh-ioda.o eeh-powernv.o
obj-$(CONFIG_PPC_SCOM) += opal-xscom.o obj-$(CONFIG_PPC_SCOM) += opal-xscom.o
......
...@@ -267,7 +267,7 @@ static int ioda_eeh_get_state(struct eeh_pe *pe) ...@@ -267,7 +267,7 @@ static int ioda_eeh_get_state(struct eeh_pe *pe)
{ {
s64 ret = 0; s64 ret = 0;
u8 fstate; u8 fstate;
u16 pcierr; __be16 pcierr;
u32 pe_no; u32 pe_no;
int result; int result;
struct pci_controller *hose = pe->phb; struct pci_controller *hose = pe->phb;
...@@ -316,7 +316,7 @@ static int ioda_eeh_get_state(struct eeh_pe *pe) ...@@ -316,7 +316,7 @@ static int ioda_eeh_get_state(struct eeh_pe *pe)
result = 0; result = 0;
result &= ~EEH_STATE_RESET_ACTIVE; result &= ~EEH_STATE_RESET_ACTIVE;
if (pcierr != OPAL_EEH_PHB_ERROR) { if (be16_to_cpu(pcierr) != OPAL_EEH_PHB_ERROR) {
result |= EEH_STATE_MMIO_ACTIVE; result |= EEH_STATE_MMIO_ACTIVE;
result |= EEH_STATE_DMA_ACTIVE; result |= EEH_STATE_DMA_ACTIVE;
result |= EEH_STATE_MMIO_ENABLED; result |= EEH_STATE_MMIO_ENABLED;
...@@ -705,18 +705,19 @@ static int ioda_eeh_next_error(struct eeh_pe **pe) ...@@ -705,18 +705,19 @@ static int ioda_eeh_next_error(struct eeh_pe **pe)
{ {
struct pci_controller *hose; struct pci_controller *hose;
struct pnv_phb *phb; struct pnv_phb *phb;
struct eeh_pe *phb_pe; struct eeh_pe *phb_pe, *parent_pe;
u64 frozen_pe_no; __be64 frozen_pe_no;
u16 err_type, severity; __be16 err_type, severity;
int active_flags = (EEH_STATE_MMIO_ACTIVE | EEH_STATE_DMA_ACTIVE);
long rc; long rc;
int ret = EEH_NEXT_ERR_NONE; int state, ret = EEH_NEXT_ERR_NONE;
/* /*
* While running here, it's safe to purge the event queue. * While running here, it's safe to purge the event queue.
* And we should keep the cached OPAL notifier event sychronized * And we should keep the cached OPAL notifier event sychronized
* between the kernel and firmware. * between the kernel and firmware.
*/ */
eeh_remove_event(NULL); eeh_remove_event(NULL, false);
opal_notifier_update_evt(OPAL_EVENT_PCI_ERROR, 0x0ul); opal_notifier_update_evt(OPAL_EVENT_PCI_ERROR, 0x0ul);
list_for_each_entry(hose, &hose_list, list_node) { list_for_each_entry(hose, &hose_list, list_node) {
...@@ -742,8 +743,8 @@ static int ioda_eeh_next_error(struct eeh_pe **pe) ...@@ -742,8 +743,8 @@ static int ioda_eeh_next_error(struct eeh_pe **pe)
} }
/* If the PHB doesn't have error, stop processing */ /* If the PHB doesn't have error, stop processing */
if (err_type == OPAL_EEH_NO_ERROR || if (be16_to_cpu(err_type) == OPAL_EEH_NO_ERROR ||
severity == OPAL_EEH_SEV_NO_ERROR) { be16_to_cpu(severity) == OPAL_EEH_SEV_NO_ERROR) {
pr_devel("%s: No error found on PHB#%x\n", pr_devel("%s: No error found on PHB#%x\n",
__func__, hose->global_number); __func__, hose->global_number);
continue; continue;
...@@ -755,14 +756,14 @@ static int ioda_eeh_next_error(struct eeh_pe **pe) ...@@ -755,14 +756,14 @@ static int ioda_eeh_next_error(struct eeh_pe **pe)
* specific PHB. * specific PHB.
*/ */
pr_devel("%s: Error (%d, %d, %llu) on PHB#%x\n", pr_devel("%s: Error (%d, %d, %llu) on PHB#%x\n",
__func__, err_type, severity, __func__, be16_to_cpu(err_type), be16_to_cpu(severity),
frozen_pe_no, hose->global_number); be64_to_cpu(frozen_pe_no), hose->global_number);
switch (err_type) { switch (be16_to_cpu(err_type)) {
case OPAL_EEH_IOC_ERROR: case OPAL_EEH_IOC_ERROR:
if (severity == OPAL_EEH_SEV_IOC_DEAD) { if (be16_to_cpu(severity) == OPAL_EEH_SEV_IOC_DEAD) {
pr_err("EEH: dead IOC detected\n"); pr_err("EEH: dead IOC detected\n");
ret = EEH_NEXT_ERR_DEAD_IOC; ret = EEH_NEXT_ERR_DEAD_IOC;
} else if (severity == OPAL_EEH_SEV_INF) { } else if (be16_to_cpu(severity) == OPAL_EEH_SEV_INF) {
pr_info("EEH: IOC informative error " pr_info("EEH: IOC informative error "
"detected\n"); "detected\n");
ioda_eeh_hub_diag(hose); ioda_eeh_hub_diag(hose);
...@@ -771,20 +772,26 @@ static int ioda_eeh_next_error(struct eeh_pe **pe) ...@@ -771,20 +772,26 @@ static int ioda_eeh_next_error(struct eeh_pe **pe)
break; break;
case OPAL_EEH_PHB_ERROR: case OPAL_EEH_PHB_ERROR:
if (severity == OPAL_EEH_SEV_PHB_DEAD) { if (be16_to_cpu(severity) == OPAL_EEH_SEV_PHB_DEAD) {
*pe = phb_pe; *pe = phb_pe;
pr_err("EEH: dead PHB#%x detected\n", pr_err("EEH: dead PHB#%x detected, "
hose->global_number); "location: %s\n",
hose->global_number,
eeh_pe_loc_get(phb_pe));
ret = EEH_NEXT_ERR_DEAD_PHB; ret = EEH_NEXT_ERR_DEAD_PHB;
} else if (severity == OPAL_EEH_SEV_PHB_FENCED) { } else if (be16_to_cpu(severity) ==
OPAL_EEH_SEV_PHB_FENCED) {
*pe = phb_pe; *pe = phb_pe;
pr_err("EEH: fenced PHB#%x detected\n", pr_err("EEH: Fenced PHB#%x detected, "
hose->global_number); "location: %s\n",
hose->global_number,
eeh_pe_loc_get(phb_pe));
ret = EEH_NEXT_ERR_FENCED_PHB; ret = EEH_NEXT_ERR_FENCED_PHB;
} else if (severity == OPAL_EEH_SEV_INF) { } else if (be16_to_cpu(severity) == OPAL_EEH_SEV_INF) {
pr_info("EEH: PHB#%x informative error " pr_info("EEH: PHB#%x informative error "
"detected\n", "detected, location: %s\n",
hose->global_number); hose->global_number,
eeh_pe_loc_get(phb_pe));
ioda_eeh_phb_diag(hose); ioda_eeh_phb_diag(hose);
ret = EEH_NEXT_ERR_NONE; ret = EEH_NEXT_ERR_NONE;
} }
...@@ -792,34 +799,33 @@ static int ioda_eeh_next_error(struct eeh_pe **pe) ...@@ -792,34 +799,33 @@ static int ioda_eeh_next_error(struct eeh_pe **pe)
break; break;
case OPAL_EEH_PE_ERROR: case OPAL_EEH_PE_ERROR:
/* /*
* If we can't find the corresponding PE, the * If we can't find the corresponding PE, we
* PEEV / PEST would be messy. So we force an * just try to unfreeze.
* fenced PHB so that it can be recovered.
*
* If the PE has been marked as isolated, that
* should have been removed permanently or in
* progress with recovery. We needn't report
* it again.
*/ */
if (ioda_eeh_get_pe(hose, frozen_pe_no, pe)) { if (ioda_eeh_get_pe(hose,
*pe = phb_pe; be64_to_cpu(frozen_pe_no), pe)) {
pr_err("EEH: Escalated fenced PHB#%x " /* Try best to clear it */
"detected for PE#%llx\n", pr_info("EEH: Clear non-existing PHB#%x-PE#%llx\n",
hose->global_number, hose->global_number, frozen_pe_no);
frozen_pe_no); pr_info("EEH: PHB location: %s\n",
ret = EEH_NEXT_ERR_FENCED_PHB; eeh_pe_loc_get(phb_pe));
opal_pci_eeh_freeze_clear(phb->opal_id, frozen_pe_no,
OPAL_EEH_ACTION_CLEAR_FREEZE_ALL);
ret = EEH_NEXT_ERR_NONE;
} else if ((*pe)->state & EEH_PE_ISOLATED) { } else if ((*pe)->state & EEH_PE_ISOLATED) {
ret = EEH_NEXT_ERR_NONE; ret = EEH_NEXT_ERR_NONE;
} else { } else {
pr_err("EEH: Frozen PE#%x on PHB#%x detected\n", pr_err("EEH: Frozen PE#%x on PHB#%x detected\n",
(*pe)->addr, (*pe)->phb->global_number); (*pe)->addr, (*pe)->phb->global_number);
pr_err("EEH: PE location: %s, PHB location: %s\n",
eeh_pe_loc_get(*pe), eeh_pe_loc_get(phb_pe));
ret = EEH_NEXT_ERR_FROZEN_PE; ret = EEH_NEXT_ERR_FROZEN_PE;
} }
break; break;
default: default:
pr_warn("%s: Unexpected error type %d\n", pr_warn("%s: Unexpected error type %d\n",
__func__, err_type); __func__, be16_to_cpu(err_type));
} }
/* /*
...@@ -836,6 +842,31 @@ static int ioda_eeh_next_error(struct eeh_pe **pe) ...@@ -836,6 +842,31 @@ static int ioda_eeh_next_error(struct eeh_pe **pe)
ioda_eeh_phb_diag(hose); ioda_eeh_phb_diag(hose);
} }
/*
* We probably have the frozen parent PE out there and
* we need have to handle frozen parent PE firstly.
*/
if (ret == EEH_NEXT_ERR_FROZEN_PE) {
parent_pe = (*pe)->parent;
while (parent_pe) {
/* Hit the ceiling ? */
if (parent_pe->type & EEH_PE_PHB)
break;
/* Frozen parent PE ? */
state = ioda_eeh_get_state(parent_pe);
if (state > 0 &&
(state & active_flags) != active_flags)
*pe = parent_pe;
/* Next parent level */
parent_pe = parent_pe->parent;
}
/* We possibly migrate to another PE */
eeh_pe_state_mark(*pe, EEH_PE_ISOLATED);
}
/* /*
* If we have no errors on the specific PHB or only * If we have no errors on the specific PHB or only
* informative error there, we continue poking it. * informative error there, we continue poking it.
......
...@@ -37,7 +37,8 @@ static ssize_t opal_msglog_read(struct file *file, struct kobject *kobj, ...@@ -37,7 +37,8 @@ static ssize_t opal_msglog_read(struct file *file, struct kobject *kobj,
{ {
struct memcons *mc = bin_attr->private; struct memcons *mc = bin_attr->private;
const char *conbuf; const char *conbuf;
size_t ret, first_read = 0; ssize_t ret;
size_t first_read = 0;
uint32_t out_pos, avail; uint32_t out_pos, avail;
if (!mc) if (!mc)
...@@ -69,6 +70,9 @@ static ssize_t opal_msglog_read(struct file *file, struct kobject *kobj, ...@@ -69,6 +70,9 @@ static ssize_t opal_msglog_read(struct file *file, struct kobject *kobj,
to += first_read; to += first_read;
count -= first_read; count -= first_read;
pos -= avail; pos -= avail;
if (count <= 0)
goto out;
} }
/* Sanity check. The firmware should not do this to us. */ /* Sanity check. The firmware should not do this to us. */
......
...@@ -260,10 +260,10 @@ void __init opal_sys_param_init(void) ...@@ -260,10 +260,10 @@ void __init opal_sys_param_init(void)
attr[i].kobj_attr.attr.mode = S_IRUGO; attr[i].kobj_attr.attr.mode = S_IRUGO;
break; break;
case OPAL_SYSPARAM_WRITE: case OPAL_SYSPARAM_WRITE:
attr[i].kobj_attr.attr.mode = S_IWUGO; attr[i].kobj_attr.attr.mode = S_IWUSR;
break; break;
case OPAL_SYSPARAM_RW: case OPAL_SYSPARAM_RW:
attr[i].kobj_attr.attr.mode = S_IRUGO | S_IWUGO; attr[i].kobj_attr.attr.mode = S_IRUGO | S_IWUSR;
break; break;
default: default:
break; break;
......
...@@ -206,72 +206,91 @@ static void pnv_pci_dump_phb3_diag_data(struct pci_controller *hose, ...@@ -206,72 +206,91 @@ static void pnv_pci_dump_phb3_diag_data(struct pci_controller *hose,
data = (struct OpalIoPhb3ErrorData*)common; data = (struct OpalIoPhb3ErrorData*)common;
pr_info("PHB3 PHB#%d Diag-data (Version: %d)\n", pr_info("PHB3 PHB#%d Diag-data (Version: %d)\n",
hose->global_number, common->version); hose->global_number, be32_to_cpu(common->version));
if (data->brdgCtl) if (data->brdgCtl)
pr_info("brdgCtl: %08x\n", pr_info("brdgCtl: %08x\n",
data->brdgCtl); be32_to_cpu(data->brdgCtl));
if (data->portStatusReg || data->rootCmplxStatus || if (data->portStatusReg || data->rootCmplxStatus ||
data->busAgentStatus) data->busAgentStatus)
pr_info("UtlSts: %08x %08x %08x\n", pr_info("UtlSts: %08x %08x %08x\n",
data->portStatusReg, data->rootCmplxStatus, be32_to_cpu(data->portStatusReg),
data->busAgentStatus); be32_to_cpu(data->rootCmplxStatus),
be32_to_cpu(data->busAgentStatus));
if (data->deviceStatus || data->slotStatus || if (data->deviceStatus || data->slotStatus ||
data->linkStatus || data->devCmdStatus || data->linkStatus || data->devCmdStatus ||
data->devSecStatus) data->devSecStatus)
pr_info("RootSts: %08x %08x %08x %08x %08x\n", pr_info("RootSts: %08x %08x %08x %08x %08x\n",
data->deviceStatus, data->slotStatus, be32_to_cpu(data->deviceStatus),
data->linkStatus, data->devCmdStatus, be32_to_cpu(data->slotStatus),
data->devSecStatus); be32_to_cpu(data->linkStatus),
be32_to_cpu(data->devCmdStatus),
be32_to_cpu(data->devSecStatus));
if (data->rootErrorStatus || data->uncorrErrorStatus || if (data->rootErrorStatus || data->uncorrErrorStatus ||
data->corrErrorStatus) data->corrErrorStatus)
pr_info("RootErrSts: %08x %08x %08x\n", pr_info("RootErrSts: %08x %08x %08x\n",
data->rootErrorStatus, data->uncorrErrorStatus, be32_to_cpu(data->rootErrorStatus),
data->corrErrorStatus); be32_to_cpu(data->uncorrErrorStatus),
be32_to_cpu(data->corrErrorStatus));
if (data->tlpHdr1 || data->tlpHdr2 || if (data->tlpHdr1 || data->tlpHdr2 ||
data->tlpHdr3 || data->tlpHdr4) data->tlpHdr3 || data->tlpHdr4)
pr_info("RootErrLog: %08x %08x %08x %08x\n", pr_info("RootErrLog: %08x %08x %08x %08x\n",
data->tlpHdr1, data->tlpHdr2, be32_to_cpu(data->tlpHdr1),
data->tlpHdr3, data->tlpHdr4); be32_to_cpu(data->tlpHdr2),
be32_to_cpu(data->tlpHdr3),
be32_to_cpu(data->tlpHdr4));
if (data->sourceId || data->errorClass || if (data->sourceId || data->errorClass ||
data->correlator) data->correlator)
pr_info("RootErrLog1: %08x %016llx %016llx\n", pr_info("RootErrLog1: %08x %016llx %016llx\n",
data->sourceId, data->errorClass, be32_to_cpu(data->sourceId),
data->correlator); be64_to_cpu(data->errorClass),
be64_to_cpu(data->correlator));
if (data->nFir) if (data->nFir)
pr_info("nFir: %016llx %016llx %016llx\n", pr_info("nFir: %016llx %016llx %016llx\n",
data->nFir, data->nFirMask, be64_to_cpu(data->nFir),
data->nFirWOF); be64_to_cpu(data->nFirMask),
be64_to_cpu(data->nFirWOF));
if (data->phbPlssr || data->phbCsr) if (data->phbPlssr || data->phbCsr)
pr_info("PhbSts: %016llx %016llx\n", pr_info("PhbSts: %016llx %016llx\n",
data->phbPlssr, data->phbCsr); be64_to_cpu(data->phbPlssr),
be64_to_cpu(data->phbCsr));
if (data->lemFir) if (data->lemFir)
pr_info("Lem: %016llx %016llx %016llx\n", pr_info("Lem: %016llx %016llx %016llx\n",
data->lemFir, data->lemErrorMask, be64_to_cpu(data->lemFir),
data->lemWOF); be64_to_cpu(data->lemErrorMask),
be64_to_cpu(data->lemWOF));
if (data->phbErrorStatus) if (data->phbErrorStatus)
pr_info("PhbErr: %016llx %016llx %016llx %016llx\n", pr_info("PhbErr: %016llx %016llx %016llx %016llx\n",
data->phbErrorStatus, data->phbFirstErrorStatus, be64_to_cpu(data->phbErrorStatus),
data->phbErrorLog0, data->phbErrorLog1); be64_to_cpu(data->phbFirstErrorStatus),
be64_to_cpu(data->phbErrorLog0),
be64_to_cpu(data->phbErrorLog1));
if (data->mmioErrorStatus) if (data->mmioErrorStatus)
pr_info("OutErr: %016llx %016llx %016llx %016llx\n", pr_info("OutErr: %016llx %016llx %016llx %016llx\n",
data->mmioErrorStatus, data->mmioFirstErrorStatus, be64_to_cpu(data->mmioErrorStatus),
data->mmioErrorLog0, data->mmioErrorLog1); be64_to_cpu(data->mmioFirstErrorStatus),
be64_to_cpu(data->mmioErrorLog0),
be64_to_cpu(data->mmioErrorLog1));
if (data->dma0ErrorStatus) if (data->dma0ErrorStatus)
pr_info("InAErr: %016llx %016llx %016llx %016llx\n", pr_info("InAErr: %016llx %016llx %016llx %016llx\n",
data->dma0ErrorStatus, data->dma0FirstErrorStatus, be64_to_cpu(data->dma0ErrorStatus),
data->dma0ErrorLog0, data->dma0ErrorLog1); be64_to_cpu(data->dma0FirstErrorStatus),
be64_to_cpu(data->dma0ErrorLog0),
be64_to_cpu(data->dma0ErrorLog1));
if (data->dma1ErrorStatus) if (data->dma1ErrorStatus)
pr_info("InBErr: %016llx %016llx %016llx %016llx\n", pr_info("InBErr: %016llx %016llx %016llx %016llx\n",
data->dma1ErrorStatus, data->dma1FirstErrorStatus, be64_to_cpu(data->dma1ErrorStatus),
data->dma1ErrorLog0, data->dma1ErrorLog1); be64_to_cpu(data->dma1FirstErrorStatus),
be64_to_cpu(data->dma1ErrorLog0),
be64_to_cpu(data->dma1ErrorLog1));
for (i = 0; i < OPAL_PHB3_NUM_PEST_REGS; i++) { for (i = 0; i < OPAL_PHB3_NUM_PEST_REGS; i++) {
if ((data->pestA[i] >> 63) == 0 && if ((be64_to_cpu(data->pestA[i]) >> 63) == 0 &&
(data->pestB[i] >> 63) == 0) (be64_to_cpu(data->pestB[i]) >> 63) == 0)
continue; continue;
pr_info("PE[%3d] A/B: %016llx %016llx\n", pr_info("PE[%3d] A/B: %016llx %016llx\n",
i, data->pestA[i], data->pestB[i]); i, be64_to_cpu(data->pestA[i]),
be64_to_cpu(data->pestB[i]));
} }
} }
...@@ -284,7 +303,7 @@ void pnv_pci_dump_phb_diag_data(struct pci_controller *hose, ...@@ -284,7 +303,7 @@ void pnv_pci_dump_phb_diag_data(struct pci_controller *hose,
return; return;
common = (struct OpalIoPhbErrorCommon *)log_buff; common = (struct OpalIoPhbErrorCommon *)log_buff;
switch (common->ioType) { switch (be32_to_cpu(common->ioType)) {
case OPAL_PHB_ERROR_DATA_TYPE_P7IOC: case OPAL_PHB_ERROR_DATA_TYPE_P7IOC:
pnv_pci_dump_p7ioc_diag_data(hose, common); pnv_pci_dump_p7ioc_diag_data(hose, common);
break; break;
...@@ -293,7 +312,7 @@ void pnv_pci_dump_phb_diag_data(struct pci_controller *hose, ...@@ -293,7 +312,7 @@ void pnv_pci_dump_phb_diag_data(struct pci_controller *hose,
break; break;
default: default:
pr_warn("%s: Unrecognized ioType %d\n", pr_warn("%s: Unrecognized ioType %d\n",
__func__, common->ioType); __func__, be32_to_cpu(common->ioType));
} }
} }
......
...@@ -35,11 +35,14 @@ ...@@ -35,11 +35,14 @@
#include <asm/rtas.h> #include <asm/rtas.h>
#include <asm/opal.h> #include <asm/opal.h>
#include <asm/kexec.h> #include <asm/kexec.h>
#include <asm/smp.h>
#include "powernv.h" #include "powernv.h"
static void __init pnv_setup_arch(void) static void __init pnv_setup_arch(void)
{ {
set_arch_panic_timeout(10, ARCH_PANIC_TIMEOUT);
/* Initialize SMP */ /* Initialize SMP */
pnv_smp_init(); pnv_smp_init();
......
...@@ -32,6 +32,7 @@ ...@@ -32,6 +32,7 @@
#include <asm/opal.h> #include <asm/opal.h>
#include <asm/runlatch.h> #include <asm/runlatch.h>
#include <asm/code-patching.h> #include <asm/code-patching.h>
#include <asm/dbell.h>
#include "powernv.h" #include "powernv.h"
...@@ -46,6 +47,11 @@ static void pnv_smp_setup_cpu(int cpu) ...@@ -46,6 +47,11 @@ static void pnv_smp_setup_cpu(int cpu)
{ {
if (cpu != boot_cpuid) if (cpu != boot_cpuid)
xics_setup_cpu(); xics_setup_cpu();
#ifdef CONFIG_PPC_DOORBELL
if (cpu_has_feature(CPU_FTR_DBELL))
doorbell_setup_this_cpu();
#endif
} }
int pnv_smp_kick_cpu(int nr) int pnv_smp_kick_cpu(int nr)
......
...@@ -21,6 +21,7 @@ config PPC_PSERIES ...@@ -21,6 +21,7 @@ config PPC_PSERIES
select HAVE_CONTEXT_TRACKING select HAVE_CONTEXT_TRACKING
select HOTPLUG_CPU if SMP select HOTPLUG_CPU if SMP
select ARCH_RANDOM select ARCH_RANDOM
select PPC_DOORBELL
default y default y
config PPC_SPLPAR config PPC_SPLPAR
......
config PPC_WSP
bool
select PPC_A2
select GENERIC_TBSYNC
select PPC_ICSWX
select PPC_SCOM
select PPC_XICS
select PPC_ICP_NATIVE
select PCI
select PPC_IO_WORKAROUNDS if PCI
select PPC_INDIRECT_PIO if PCI
default n
menu "WSP platform selection"
depends on PPC_BOOK3E_64
config PPC_PSR2
bool "PowerEN System Reference Platform 2"
select EPAPR_BOOT
select PPC_WSP
default y
config PPC_CHROMA
bool "PowerEN PCIe Chroma Card"
select EPAPR_BOOT
select PPC_WSP
select OF_DYNAMIC
default y
endmenu
ccflags-y += $(NO_MINIMAL_TOC)
obj-y += setup.o ics.o wsp.o
obj-$(CONFIG_PPC_PSR2) += psr2.o
obj-$(CONFIG_PPC_CHROMA) += chroma.o h8.o
obj-$(CONFIG_PPC_WSP) += opb_pic.o
obj-$(CONFIG_PPC_WSP) += scom_wsp.o
obj-$(CONFIG_SMP) += smp.o scom_smp.o
obj-$(CONFIG_PCI) += wsp_pci.o
obj-$(CONFIG_PCI_MSI) += msi.o
/*
* Copyright 2008-2011, IBM Corporation
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/irq.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/of.h>
#include <linux/smp.h>
#include <linux/time.h>
#include <linux/of_fdt.h>
#include <asm/machdep.h>
#include <asm/udbg.h>
#include "ics.h"
#include "wsp.h"
void __init chroma_setup_arch(void)
{
wsp_setup_arch();
wsp_setup_h8();
}
static int __init chroma_probe(void)
{
unsigned long root = of_get_flat_dt_root();
if (!of_flat_dt_is_compatible(root, "ibm,wsp-chroma"))
return 0;
return 1;
}
define_machine(chroma_md) {
.name = "Chroma PCIe",
.probe = chroma_probe,
.setup_arch = chroma_setup_arch,
.restart = wsp_h8_restart,
.power_off = wsp_h8_power_off,
.halt = wsp_halt,
.calibrate_decr = generic_calibrate_decr,
.init_IRQ = wsp_setup_irq,
.progress = udbg_progress,
.power_save = book3e_idle,
};
machine_arch_initcall(chroma_md, wsp_probe_devices);
/*
* Copyright 2008-2011, IBM Corporation
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/kernel.h>
#include <linux/of.h>
#include <linux/io.h>
#include <linux/of_address.h>
#include "wsp.h"
/*
* The UART connection to the H8 is over ttyS1 which is just a 16550.
* We assume that FW has it setup right and no one messes with it.
*/
static u8 __iomem *h8;
#define RBR 0 /* Receiver Buffer Register */
#define THR 0 /* Transmitter Holding Register */
#define LSR 5 /* Line Status Register */
#define LSR_DR 0x01 /* LSR value for Data-Ready */
#define LSR_THRE 0x20 /* LSR value for Transmitter-Holding-Register-Empty */
static void wsp_h8_putc(int c)
{
u8 lsr;
do {
lsr = readb(h8 + LSR);
} while ((lsr & LSR_THRE) != LSR_THRE);
writeb(c, h8 + THR);
}
static int wsp_h8_getc(void)
{
u8 lsr;
do {
lsr = readb(h8 + LSR);
} while ((lsr & LSR_DR) != LSR_DR);
return readb(h8 + RBR);
}
static void wsp_h8_puts(const char *s, int sz)
{
int i;
for (i = 0; i < sz; i++) {
wsp_h8_putc(s[i]);
/* no flow control so wait for echo */
wsp_h8_getc();
}
wsp_h8_putc('\r');
wsp_h8_putc('\n');
}
static void wsp_h8_terminal_cmd(const char *cmd, int sz)
{
hard_irq_disable();
wsp_h8_puts(cmd, sz);
/* should never return, but just in case */
for (;;)
continue;
}
void wsp_h8_restart(char *cmd)
{
static const char restart[] = "warm-reset";
(void)cmd;
wsp_h8_terminal_cmd(restart, sizeof(restart) - 1);
}
void wsp_h8_power_off(void)
{
static const char off[] = "power-off";
wsp_h8_terminal_cmd(off, sizeof(off) - 1);
}
static void __iomem *wsp_h8_getaddr(void)
{
struct device_node *aliases;
struct device_node *uart;
struct property *path;
void __iomem *va = NULL;
/*
* there is nothing in the devtree to tell us which is mapped
* to the H8, but se know it is the second serial port.
*/
aliases = of_find_node_by_path("/aliases");
if (aliases == NULL)
return NULL;
path = of_find_property(aliases, "serial1", NULL);
if (path == NULL)
goto out;
uart = of_find_node_by_path(path->value);
if (uart == NULL)
goto out;
va = of_iomap(uart, 0);
/* remove it so no one messes with it */
of_detach_node(uart);
of_node_put(uart);
out:
of_node_put(aliases);
return va;
}
void __init wsp_setup_h8(void)
{
h8 = wsp_h8_getaddr();
/* Devtree change? lets hard map it anyway */
if (h8 == NULL) {
pr_warn("UART to H8 could not be found");
h8 = ioremap(0xffc0008000ULL, 0x100);
}
}
This diff is collapsed.
/*
* Copyright 2009 IBM Corporation.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#ifndef __ICS_H
#define __ICS_H
#define XIVE_ADDR_MASK 0x7FFULL
extern void wsp_init_irq(void);
extern int wsp_ics_alloc_irq(struct device_node *dn, int num);
extern void wsp_ics_free_irq(struct device_node *dn, unsigned int irq);
#ifdef CONFIG_PCI_MSI
extern void wsp_ics_set_msi_chip(unsigned int irq);
extern void wsp_ics_set_std_chip(unsigned int irq);
#endif /* CONFIG_PCI_MSI */
#endif /* __ICS_H */
/*
* Copyright 2011 Michael Ellerman, IBM Corp.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/msi.h>
#include <linux/irq.h>
#include <linux/interrupt.h>
#include "msi.h"
#include "ics.h"
#include "wsp_pci.h"
/* Magic addresses for 32 & 64-bit MSIs with hardcoded MVE 0 */
#define MSI_ADDR_32 0xFFFF0000ul
#define MSI_ADDR_64 0x1000000000000000ul
int wsp_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
{
struct pci_controller *phb;
struct msi_desc *entry;
struct msi_msg msg;
unsigned int virq;
int hwirq;
phb = pci_bus_to_host(dev->bus);
if (!phb)
return -ENOENT;
entry = list_first_entry(&dev->msi_list, struct msi_desc, list);
if (entry->msi_attrib.is_64) {
msg.address_lo = 0;
msg.address_hi = MSI_ADDR_64 >> 32;
} else {
msg.address_lo = MSI_ADDR_32;
msg.address_hi = 0;
}
list_for_each_entry(entry, &dev->msi_list, list) {
hwirq = wsp_ics_alloc_irq(phb->dn, 1);
if (hwirq < 0) {
dev_warn(&dev->dev, "wsp_msi: hwirq alloc failed!\n");
return hwirq;
}
virq = irq_create_mapping(NULL, hwirq);
if (virq == NO_IRQ) {
dev_warn(&dev->dev, "wsp_msi: virq alloc failed!\n");
return -1;
}
dev_dbg(&dev->dev, "wsp_msi: allocated irq %#x/%#x\n",
hwirq, virq);
wsp_ics_set_msi_chip(virq);
irq_set_msi_desc(virq, entry);
msg.data = hwirq & XIVE_ADDR_MASK;
write_msi_msg(virq, &msg);
}
return 0;
}
void wsp_teardown_msi_irqs(struct pci_dev *dev)
{
struct pci_controller *phb;
struct msi_desc *entry;
int hwirq;
phb = pci_bus_to_host(dev->bus);
dev_dbg(&dev->dev, "wsp_msi: tearing down msi irqs\n");
list_for_each_entry(entry, &dev->msi_list, list) {
if (entry->irq == NO_IRQ)
continue;
irq_set_msi_desc(entry->irq, NULL);
wsp_ics_set_std_chip(entry->irq);
hwirq = virq_to_hw(entry->irq);
/* In this order to avoid racing with irq_create_mapping() */
irq_dispose_mapping(entry->irq);
wsp_ics_free_irq(phb->dn, hwirq);
}
}
void wsp_setup_phb_msi(struct pci_controller *phb)
{
/* Create a single MVE at offset 0 that matches everything */
out_be64(phb->cfg_data + PCIE_REG_IODA_ADDR, PCIE_REG_IODA_AD_TBL_MVT);
out_be64(phb->cfg_data + PCIE_REG_IODA_DATA0, 1ull << 63);
ppc_md.setup_msi_irqs = wsp_setup_msi_irqs;
ppc_md.teardown_msi_irqs = wsp_teardown_msi_irqs;
}
/*
* Copyright 2011 Michael Ellerman, IBM Corp.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#ifndef __WSP_MSI_H
#define __WSP_MSI_H
#ifdef CONFIG_PCI_MSI
extern void wsp_setup_phb_msi(struct pci_controller *phb);
#else
static inline void wsp_setup_phb_msi(struct pci_controller *phb) { }
#endif
#endif /* __WSP_MSI_H */
/*
* IBM Onboard Peripheral Bus Interrupt Controller
*
* Copyright 2010 Jack Miller, IBM Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/irq.h>
#include <linux/of.h>
#include <linux/slab.h>
#include <linux/time.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <asm/reg_a2.h>
#include <asm/irq.h>
#define OPB_NR_IRQS 32
#define OPB_MLSASIER 0x04 /* MLS Accumulated Status IER */
#define OPB_MLSIR 0x50 /* MLS Interrupt Register */
#define OPB_MLSIER 0x54 /* MLS Interrupt Enable Register */
#define OPB_MLSIPR 0x58 /* MLS Interrupt Polarity Register */
#define OPB_MLSIIR 0x5c /* MLS Interrupt Inputs Register */
static int opb_index = 0;
struct opb_pic {
struct irq_domain *host;
void *regs;
int index;
spinlock_t lock;
};
static u32 opb_in(struct opb_pic *opb, int offset)
{
return in_be32(opb->regs + offset);
}
static void opb_out(struct opb_pic *opb, int offset, u32 val)
{
out_be32(opb->regs + offset, val);
}
static void opb_unmask_irq(struct irq_data *d)
{
struct opb_pic *opb;
unsigned long flags;
u32 ier, bitset;
opb = d->chip_data;
bitset = (1 << (31 - irqd_to_hwirq(d)));
spin_lock_irqsave(&opb->lock, flags);
ier = opb_in(opb, OPB_MLSIER);
opb_out(opb, OPB_MLSIER, ier | bitset);
ier = opb_in(opb, OPB_MLSIER);
spin_unlock_irqrestore(&opb->lock, flags);
}
static void opb_mask_irq(struct irq_data *d)
{
struct opb_pic *opb;
unsigned long flags;
u32 ier, mask;
opb = d->chip_data;
mask = ~(1 << (31 - irqd_to_hwirq(d)));
spin_lock_irqsave(&opb->lock, flags);
ier = opb_in(opb, OPB_MLSIER);
opb_out(opb, OPB_MLSIER, ier & mask);
ier = opb_in(opb, OPB_MLSIER); // Flush posted writes
spin_unlock_irqrestore(&opb->lock, flags);
}
static void opb_ack_irq(struct irq_data *d)
{
struct opb_pic *opb;
unsigned long flags;
u32 bitset;
opb = d->chip_data;
bitset = (1 << (31 - irqd_to_hwirq(d)));
spin_lock_irqsave(&opb->lock, flags);
opb_out(opb, OPB_MLSIR, bitset);
opb_in(opb, OPB_MLSIR); // Flush posted writes
spin_unlock_irqrestore(&opb->lock, flags);
}
static void opb_mask_ack_irq(struct irq_data *d)
{
struct opb_pic *opb;
unsigned long flags;
u32 bitset;
u32 ier, ir;
opb = d->chip_data;
bitset = (1 << (31 - irqd_to_hwirq(d)));
spin_lock_irqsave(&opb->lock, flags);
ier = opb_in(opb, OPB_MLSIER);
opb_out(opb, OPB_MLSIER, ier & ~bitset);
ier = opb_in(opb, OPB_MLSIER); // Flush posted writes
opb_out(opb, OPB_MLSIR, bitset);
ir = opb_in(opb, OPB_MLSIR); // Flush posted writes
spin_unlock_irqrestore(&opb->lock, flags);
}
static int opb_set_irq_type(struct irq_data *d, unsigned int flow)
{
struct opb_pic *opb;
unsigned long flags;
int invert, ipr, mask, bit;
opb = d->chip_data;
/* The only information we're interested in in the type is whether it's
* a high or low trigger. For high triggered interrupts, the polarity
* set for it in the MLS Interrupt Polarity Register is 0, for low
* interrupts it's 1 so that the proper input in the MLS Interrupt Input
* Register is interrupted as asserting the interrupt. */
switch (flow) {
case IRQ_TYPE_NONE:
opb_mask_irq(d);
return 0;
case IRQ_TYPE_LEVEL_HIGH:
invert = 0;
break;
case IRQ_TYPE_LEVEL_LOW:
invert = 1;
break;
default:
return -EINVAL;
}
bit = (1 << (31 - irqd_to_hwirq(d)));
mask = ~bit;
spin_lock_irqsave(&opb->lock, flags);
ipr = opb_in(opb, OPB_MLSIPR);
ipr = (ipr & mask) | (invert ? bit : 0);
opb_out(opb, OPB_MLSIPR, ipr);
ipr = opb_in(opb, OPB_MLSIPR); // Flush posted writes
spin_unlock_irqrestore(&opb->lock, flags);
/* Record the type in the interrupt descriptor */
irqd_set_trigger_type(d, flow);
return 0;
}
static struct irq_chip opb_irq_chip = {
.name = "OPB",
.irq_mask = opb_mask_irq,
.irq_unmask = opb_unmask_irq,
.irq_mask_ack = opb_mask_ack_irq,
.irq_ack = opb_ack_irq,
.irq_set_type = opb_set_irq_type
};
static int opb_host_map(struct irq_domain *host, unsigned int virq,
irq_hw_number_t hwirq)
{
struct opb_pic *opb;
opb = host->host_data;
/* Most of the important stuff is handled by the generic host code, like
* the lookup, so just attach some info to the virtual irq */
irq_set_chip_data(virq, opb);
irq_set_chip_and_handler(virq, &opb_irq_chip, handle_level_irq);
irq_set_irq_type(virq, IRQ_TYPE_NONE);
return 0;
}
static const struct irq_domain_ops opb_host_ops = {
.map = opb_host_map,
.xlate = irq_domain_xlate_twocell,
};
irqreturn_t opb_irq_handler(int irq, void *private)
{
struct opb_pic *opb;
u32 ir, src, subvirq;
opb = (struct opb_pic *) private;
/* Read the OPB MLS Interrupt Register for
* asserted interrupts */
ir = opb_in(opb, OPB_MLSIR);
if (!ir)
return IRQ_NONE;
do {
/* Get 1 - 32 source, *NOT* bit */
src = 32 - ffs(ir);
/* Translate from the OPB's conception of interrupt number to
* Linux's virtual IRQ */
subvirq = irq_linear_revmap(opb->host, src);
generic_handle_irq(subvirq);
} while ((ir = opb_in(opb, OPB_MLSIR)));
return IRQ_HANDLED;
}
struct opb_pic *opb_pic_init_one(struct device_node *dn)
{
struct opb_pic *opb;
struct resource res;
if (of_address_to_resource(dn, 0, &res)) {
printk(KERN_ERR "opb: Couldn't translate resource\n");
return NULL;
}
opb = kzalloc(sizeof(struct opb_pic), GFP_KERNEL);
if (!opb) {
printk(KERN_ERR "opb: Failed to allocate opb struct!\n");
return NULL;
}
/* Get access to the OPB MMIO registers */
opb->regs = ioremap(res.start + 0x10000, 0x1000);
if (!opb->regs) {
printk(KERN_ERR "opb: Failed to allocate register space!\n");
goto free_opb;
}
/* Allocate an irq domain so that Linux knows that despite only
* having one interrupt to issue, we're the controller for multiple
* hardware IRQs, so later we can lookup their virtual IRQs. */
opb->host = irq_domain_add_linear(dn, OPB_NR_IRQS, &opb_host_ops, opb);
if (!opb->host) {
printk(KERN_ERR "opb: Failed to allocate IRQ host!\n");
goto free_regs;
}
opb->index = opb_index++;
spin_lock_init(&opb->lock);
/* Disable all interrupts by default */
opb_out(opb, OPB_MLSASIER, 0);
opb_out(opb, OPB_MLSIER, 0);
/* ACK any interrupts left by FW */
opb_out(opb, OPB_MLSIR, 0xFFFFFFFF);
return opb;
free_regs:
iounmap(opb->regs);
free_opb:
kfree(opb);
return NULL;
}
void __init opb_pic_init(void)
{
struct device_node *dn;
struct opb_pic *opb;
int virq;
int rc;
/* Call init_one for each OPB device */
for_each_compatible_node(dn, NULL, "ibm,opb") {
/* Fill in an OPB struct */
opb = opb_pic_init_one(dn);
if (!opb) {
printk(KERN_WARNING "opb: Failed to init node, skipped!\n");
continue;
}
/* Map / get opb's hardware virtual irq */
virq = irq_of_parse_and_map(dn, 0);
if (virq <= 0) {
printk("opb: irq_op_parse_and_map failed!\n");
continue;
}
/* Attach opb interrupt handler to new virtual IRQ */
rc = request_irq(virq, opb_irq_handler, IRQF_NO_THREAD,
"OPB LS Cascade", opb);
if (rc) {
printk("opb: request_irq failed: %d\n", rc);
continue;
}
printk("OPB%d init with %d IRQs at %p\n", opb->index,
OPB_NR_IRQS, opb->regs);
}
}
/*
* Copyright 2008-2011, IBM Corporation
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/irq.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/of.h>
#include <linux/smp.h>
#include <linux/time.h>
#include <linux/of_fdt.h>
#include <asm/machdep.h>
#include <asm/udbg.h>
#include "ics.h"
#include "wsp.h"
static void psr2_spin(void)
{
hard_irq_disable();
for (;;)
continue;
}
static void psr2_restart(char *cmd)
{
psr2_spin();
}
static int __init psr2_probe(void)
{
unsigned long root = of_get_flat_dt_root();
if (of_flat_dt_is_compatible(root, "ibm,wsp-chroma")) {
/* chroma systems also claim they are psr2s */
return 0;
}
if (!of_flat_dt_is_compatible(root, "ibm,psr2"))
return 0;
return 1;
}
define_machine(psr2_md) {
.name = "PSR2 A2",
.probe = psr2_probe,
.setup_arch = wsp_setup_arch,
.restart = psr2_restart,
.power_off = psr2_spin,
.halt = psr2_spin,
.calibrate_decr = generic_calibrate_decr,
.init_IRQ = wsp_setup_irq,
.progress = udbg_progress,
.power_save = book3e_idle,
};
machine_arch_initcall(psr2_md, wsp_probe_devices);
This diff is collapsed.
/*
* SCOM backend for WSP
*
* Copyright 2010 Benjamin Herrenschmidt, IBM Corp.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/cpumask.h>
#include <linux/io.h>
#include <linux/of.h>
#include <linux/spinlock.h>
#include <linux/types.h>
#include <linux/of_address.h>
#include <asm/cputhreads.h>
#include <asm/reg_a2.h>
#include <asm/scom.h>
#include <asm/udbg.h>
#include "wsp.h"
static scom_map_t wsp_scom_map(struct device_node *dev, u64 reg, u64 count)
{
struct resource r;
u64 xscom_addr;
if (!of_get_property(dev, "scom-controller", NULL)) {
pr_err("%s: device %s is not a SCOM controller\n",
__func__, dev->full_name);
return SCOM_MAP_INVALID;
}
if (of_address_to_resource(dev, 0, &r)) {
pr_debug("Failed to find SCOM controller address\n");
return 0;
}
/* Transform the SCOM address into an XSCOM offset */
xscom_addr = ((reg & 0x7f000000) >> 1) | ((reg & 0xfffff) << 3);
return (scom_map_t)ioremap(r.start + xscom_addr, count << 3);
}
static void wsp_scom_unmap(scom_map_t map)
{
iounmap((void *)map);
}
static int wsp_scom_read(scom_map_t map, u64 reg, u64 *value)
{
u64 __iomem *addr = (u64 __iomem *)map;
*value = in_be64(addr + reg);
return 0;
}
static int wsp_scom_write(scom_map_t map, u64 reg, u64 value)
{
u64 __iomem *addr = (u64 __iomem *)map;
out_be64(addr + reg, value);
return 0;
}
static const struct scom_controller wsp_scom_controller = {
.map = wsp_scom_map,
.unmap = wsp_scom_unmap,
.read = wsp_scom_read,
.write = wsp_scom_write
};
void scom_init_wsp(void)
{
scom_init(&wsp_scom_controller);
}
/*
* Copyright 2010 Michael Ellerman, IBM Corporation
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/kernel.h>
#include <linux/of_platform.h>
#include "wsp.h"
/*
* Find chip-id by walking up device tree looking for ibm,wsp-chip-id property.
* Won't work for nodes that are not a descendant of a wsp node.
*/
int wsp_get_chip_id(struct device_node *dn)
{
const u32 *p;
int rc;
/* Start looking at the specified node, not its parent */
dn = of_node_get(dn);
while (dn && !(p = of_get_property(dn, "ibm,wsp-chip-id", NULL)))
dn = of_get_next_parent(dn);
if (!dn)
return -1;
rc = *p;
of_node_put(dn);
return rc;
}
/*
* SMP Support for A2 platforms
*
* Copyright 2007 Benjamin Herrenschmidt, IBM Corp.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
*/
#include <linux/cpumask.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/of.h>
#include <linux/smp.h>
#include <asm/dbell.h>
#include <asm/machdep.h>
#include <asm/xics.h>
#include "ics.h"
#include "wsp.h"
static void smp_a2_setup_cpu(int cpu)
{
doorbell_setup_this_cpu();
if (cpu != boot_cpuid)
xics_setup_cpu();
}
int smp_a2_kick_cpu(int nr)
{
const char *enable_method;
struct device_node *np;
int thr_idx;
if (nr < 0 || nr >= NR_CPUS)
return -ENOENT;
np = of_get_cpu_node(nr, &thr_idx);
if (!np)
return -ENODEV;
enable_method = of_get_property(np, "enable-method", NULL);
pr_devel("CPU%d has enable-method: \"%s\"\n", nr, enable_method);
if (!enable_method) {
printk(KERN_ERR "CPU%d has no enable-method\n", nr);
return -ENOENT;
} else if (strcmp(enable_method, "ibm,a2-scom") == 0) {
if (a2_scom_startup_cpu(nr, thr_idx, np))
return -1;
} else {
printk(KERN_ERR "CPU%d: Don't understand enable-method \"%s\"\n",
nr, enable_method);
return -EINVAL;
}
/*
* The processor is currently spinning, waiting for the
* cpu_start field to become non-zero After we set cpu_start,
* the processor will continue on to secondary_start
*/
paca[nr].cpu_start = 1;
return 0;
}
static int __init smp_a2_probe(void)
{
return num_possible_cpus();
}
static struct smp_ops_t a2_smp_ops = {
.message_pass = NULL, /* Use smp_muxed_ipi_message_pass */
.cause_ipi = doorbell_cause_ipi,
.probe = smp_a2_probe,
.kick_cpu = smp_a2_kick_cpu,
.setup_cpu = smp_a2_setup_cpu,
};
void __init a2_setup_smp(void)
{
smp_ops = &a2_smp_ops;
}
/*
* Copyright 2008-2011, IBM Corporation
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/kernel.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/smp.h>
#include <linux/delay.h>
#include <linux/time.h>
#include <linux/of_address.h>
#include <asm/scom.h>
#include "wsp.h"
#include "ics.h"
#define WSP_SOC_COMPATIBLE "ibm,wsp-soc"
#define PBIC_COMPATIBLE "ibm,wsp-pbic"
#define COPRO_COMPATIBLE "ibm,wsp-coprocessor"
static int __init wsp_probe_buses(void)
{
static __initdata struct of_device_id bus_ids[] = {
/*
* every node in between needs to be here or you won't
* find it
*/
{ .compatible = WSP_SOC_COMPATIBLE, },
{ .compatible = PBIC_COMPATIBLE, },
{ .compatible = COPRO_COMPATIBLE, },
{},
};
of_platform_bus_probe(NULL, bus_ids, NULL);
return 0;
}
void __init wsp_setup_arch(void)
{
/* init to some ~sane value until calibrate_delay() runs */
loops_per_jiffy = 50000000;
scom_init_wsp();
/* Setup SMP callback */
#ifdef CONFIG_SMP
a2_setup_smp();
#endif
#ifdef CONFIG_PCI
wsp_setup_pci();
#endif
}
void __init wsp_setup_irq(void)
{
wsp_init_irq();
opb_pic_init();
}
int __init wsp_probe_devices(void)
{
struct device_node *np;
/* Our RTC is a ds1500. It seems to be programatically compatible
* with the ds1511 for which we have a driver so let's use that
*/
np = of_find_compatible_node(NULL, NULL, "dallas,ds1500");
if (np != NULL) {
struct resource res;
if (of_address_to_resource(np, 0, &res) == 0)
platform_device_register_simple("ds1511", 0, &res, 1);
}
wsp_probe_buses();
return 0;
}
void wsp_halt(void)
{
u64 val;
scom_map_t m;
struct device_node *dn;
struct device_node *mine;
struct device_node *me;
int rc;
me = of_get_cpu_node(smp_processor_id(), NULL);
mine = scom_find_parent(me);
/* This will halt all the A2s but not power off the chip */
for_each_node_with_property(dn, "scom-controller") {
if (dn == mine)
continue;
m = scom_map(dn, 0, 1);
/* read-modify-write it so the HW probe does not get
* confused */
rc = scom_read(m, 0, &val);
if (rc == 0)
scom_write(m, 0, val | 1);
scom_unmap(m);
}
m = scom_map(mine, 0, 1);
rc = scom_read(m, 0, &val);
if (rc == 0)
scom_write(m, 0, val | 1);
/* should never return */
scom_unmap(m);
}
#ifndef __WSP_H
#define __WSP_H
#include <asm/wsp.h>
/* Devtree compatible strings for major devices */
#define PCIE_COMPATIBLE "ibm,wsp-pciex"
extern void wsp_setup_arch(void);
extern void wsp_setup_irq(void);
extern int wsp_probe_devices(void);
extern void wsp_halt(void);
extern void wsp_setup_pci(void);
extern void scom_init_wsp(void);
extern void a2_setup_smp(void);
extern int a2_scom_startup_cpu(unsigned int lcpu, int thr_idx,
struct device_node *np);
extern int smp_a2_kick_cpu(int nr);
extern void opb_pic_init(void);
/* chroma specific managment */
extern void wsp_h8_restart(char *cmd);
extern void wsp_h8_power_off(void);
extern void __init wsp_setup_h8(void);
#endif /* __WSP_H */
This diff is collapsed.
/*
* Copyright 2010 Ben Herrenschmidt, IBM Corporation
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#ifndef __WSP_PCI_H
#define __WSP_PCI_H
/* Architected registers */
#define PCIE_REG_DMA_CHAN_STATUS 0x110
#define PCIE_REG_CPU_LOADSTORE_STATUS 0x120
#define PCIE_REG_CONFIG_DATA 0x130
#define PCIE_REG_LOCK0 0x138
#define PCIE_REG_CONFIG_ADDRESS 0x140
#define PCIE_REG_CA_ENABLE 0x8000000000000000ull
#define PCIE_REG_CA_BUS_MASK 0x0ff0000000000000ull
#define PCIE_REG_CA_BUS_SHIFT (20+32)
#define PCIE_REG_CA_DEV_MASK 0x000f800000000000ull
#define PCIE_REG_CA_DEV_SHIFT (15+32)
#define PCIE_REG_CA_FUNC_MASK 0x0000700000000000ull
#define PCIE_REG_CA_FUNC_SHIFT (12+32)
#define PCIE_REG_CA_REG_MASK 0x00000fff00000000ull
#define PCIE_REG_CA_REG_SHIFT ( 0+32)
#define PCIE_REG_CA_BE_MASK 0x00000000f0000000ull
#define PCIE_REG_CA_BE_SHIFT ( 28)
#define PCIE_REG_LOCK1 0x148
#define PCIE_REG_PHB_CONFIG 0x160
#define PCIE_REG_PHBC_64B_TCE_EN 0x2000000000000000ull
#define PCIE_REG_PHBC_MMIO_DMA_FREEZE_EN 0x1000000000000000ull
#define PCIE_REG_PHBC_32BIT_MSI_EN 0x0080000000000000ull
#define PCIE_REG_PHBC_M64_EN 0x0040000000000000ull
#define PCIE_REG_PHBC_IO_EN 0x0008000000000000ull
#define PCIE_REG_PHBC_64BIT_MSI_EN 0x0002000000000000ull
#define PCIE_REG_PHBC_M32A_EN 0x0000800000000000ull
#define PCIE_REG_PHBC_M32B_EN 0x0000400000000000ull
#define PCIE_REG_PHBC_MSI_PE_VALIDATE 0x0000200000000000ull
#define PCIE_REG_PHBC_DMA_XLATE_BYPASS 0x0000100000000000ull
#define PCIE_REG_IO_BASE_ADDR 0x170
#define PCIE_REG_IO_BASE_MASK 0x178
#define PCIE_REG_IO_START_ADDR 0x180
#define PCIE_REG_M32A_BASE_ADDR 0x190
#define PCIE_REG_M32A_BASE_MASK 0x198
#define PCIE_REG_M32A_START_ADDR 0x1a0
#define PCIE_REG_M32B_BASE_ADDR 0x1b0
#define PCIE_REG_M32B_BASE_MASK 0x1b8
#define PCIE_REG_M32B_START_ADDR 0x1c0
#define PCIE_REG_M64_BASE_ADDR 0x1e0
#define PCIE_REG_M64_BASE_MASK 0x1e8
#define PCIE_REG_M64_START_ADDR 0x1f0
#define PCIE_REG_TCE_KILL 0x210
#define PCIE_REG_TCEKILL_SINGLE 0x8000000000000000ull
#define PCIE_REG_TCEKILL_ADDR_MASK 0x000003fffffffff8ull
#define PCIE_REG_TCEKILL_PS_4K 0
#define PCIE_REG_TCEKILL_PS_64K 1
#define PCIE_REG_TCEKILL_PS_16M 2
#define PCIE_REG_TCEKILL_PS_16G 3
#define PCIE_REG_IODA_ADDR 0x220
#define PCIE_REG_IODA_AD_AUTOINC 0x8000000000000000ull
#define PCIE_REG_IODA_AD_TBL_MVT 0x0005000000000000ull
#define PCIE_REG_IODA_AD_TBL_PELT 0x0006000000000000ull
#define PCIE_REG_IODA_AD_TBL_PESTA 0x0007000000000000ull
#define PCIE_REG_IODA_AD_TBL_PESTB 0x0008000000000000ull
#define PCIE_REG_IODA_AD_TBL_TVT 0x0009000000000000ull
#define PCIE_REG_IODA_AD_TBL_TCE 0x000a000000000000ull
#define PCIE_REG_IODA_DATA0 0x228
#define PCIE_REG_IODA_DATA1 0x230
#define PCIE_REG_LOCK2 0x240
#define PCIE_REG_PHB_GEN_CAP 0x250
#define PCIE_REG_PHB_TCE_CAP 0x258
#define PCIE_REG_PHB_IRQ_CAP 0x260
#define PCIE_REG_PHB_EEH_CAP 0x268
#define PCIE_REG_PAPR_ERR_INJ_CONTROL 0x2b0
#define PCIE_REG_PAPR_ERR_INJ_ADDR 0x2b8
#define PCIE_REG_PAPR_ERR_INJ_MASK 0x2c0
#define PCIE_REG_SYS_CFG1 0x600
#define PCIE_REG_SYS_CFG1_CLASS_CODE 0x0000000000ffffffull
#define IODA_TVT0_TTA_MASK 0x000fffffffff0000ull
#define IODA_TVT0_TTA_SHIFT 4
#define IODA_TVT0_BUSNUM_VALID_MASK 0x000000000000e000ull
#define IODA_TVT0_TCE_TABLE_SIZE_MASK 0x0000000000001f00ull
#define IODA_TVT0_TCE_TABLE_SIZE_SHIFT 8
#define IODA_TVT0_BUSNUM_VALUE_MASK 0x00000000000000ffull
#define IODA_TVT0_BUSNUM_VALID_SHIFT 0
#define IODA_TVT1_DEVNUM_VALID 0x2000000000000000ull
#define IODA_TVT1_DEVNUM_VALUE_MASK 0x1f00000000000000ull
#define IODA_TVT1_DEVNUM_VALUE_SHIFT 56
#define IODA_TVT1_FUNCNUM_VALID 0x0008000000000000ull
#define IODA_TVT1_FUNCNUM_VALUE_MASK 0x0007000000000000ull
#define IODA_TVT1_FUNCNUM_VALUE_SHIFT 48
#define IODA_TVT1_IO_PAGE_SIZE_MASK 0x00001f0000000000ull
#define IODA_TVT1_IO_PAGE_SIZE_SHIFT 40
#define IODA_TVT1_PE_NUMBER_MASK 0x000000000000003full
#define IODA_TVT1_PE_NUMBER_SHIFT 0
#define IODA_TVT_COUNT 64
/* UTL Core registers */
#define PCIE_UTL_SYS_BUS_CONTROL 0x400
#define PCIE_UTL_STATUS 0x408
#define PCIE_UTL_SYS_BUS_AGENT_STATUS 0x410
#define PCIE_UTL_SYS_BUS_AGENT_ERR_SEV 0x418
#define PCIE_UTL_SYS_BUS_AGENT_IRQ_EN 0x420
#define PCIE_UTL_SYS_BUS_BURST_SZ_CONF 0x440
#define PCIE_UTL_REVISION_ID 0x448
#define PCIE_UTL_OUT_POST_HDR_BUF_ALLOC 0x4c0
#define PCIE_UTL_OUT_POST_DAT_BUF_ALLOC 0x4d0
#define PCIE_UTL_IN_POST_HDR_BUF_ALLOC 0x4e0
#define PCIE_UTL_IN_POST_DAT_BUF_ALLOC 0x4f0
#define PCIE_UTL_OUT_NP_BUF_ALLOC 0x500
#define PCIE_UTL_IN_NP_BUF_ALLOC 0x510
#define PCIE_UTL_PCIE_TAGS_ALLOC 0x520
#define PCIE_UTL_GBIF_READ_TAGS_ALLOC 0x530
#define PCIE_UTL_PCIE_PORT_CONTROL 0x540
#define PCIE_UTL_PCIE_PORT_STATUS 0x548
#define PCIE_UTL_PCIE_PORT_ERROR_SEV 0x550
#define PCIE_UTL_PCIE_PORT_IRQ_EN 0x558
#define PCIE_UTL_RC_STATUS 0x560
#define PCIE_UTL_RC_ERR_SEVERITY 0x568
#define PCIE_UTL_RC_IRQ_EN 0x570
#define PCIE_UTL_EP_STATUS 0x578
#define PCIE_UTL_EP_ERR_SEVERITY 0x580
#define PCIE_UTL_EP_ERR_IRQ_EN 0x588
#define PCIE_UTL_PCI_PM_CTRL1 0x590
#define PCIE_UTL_PCI_PM_CTRL2 0x598
/* PCIe stack registers */
#define PCIE_REG_SYSTEM_CONFIG1 0x600
#define PCIE_REG_SYSTEM_CONFIG2 0x608
#define PCIE_REG_EP_SYSTEM_CONFIG 0x618
#define PCIE_REG_EP_FLR 0x620
#define PCIE_REG_EP_BAR_CONFIG 0x628
#define PCIE_REG_LINK_CONFIG 0x630
#define PCIE_REG_PM_CONFIG 0x640
#define PCIE_REG_DLP_CONTROL 0x650
#define PCIE_REG_DLP_STATUS 0x658
#define PCIE_REG_ERR_REPORT_CONTROL 0x660
#define PCIE_REG_SLOT_CONTROL1 0x670
#define PCIE_REG_SLOT_CONTROL2 0x678
#define PCIE_REG_UTL_CONFIG 0x680
#define PCIE_REG_BUFFERS_CONFIG 0x690
#define PCIE_REG_ERROR_INJECT 0x698
#define PCIE_REG_SRIOV_CONFIG 0x6a0
#define PCIE_REG_PF0_SRIOV_STATUS 0x6a8
#define PCIE_REG_PF1_SRIOV_STATUS 0x6b0
#define PCIE_REG_PORT_NUMBER 0x700
#define PCIE_REG_POR_SYSTEM_CONFIG 0x708
/* PHB internal logic registers */
#define PCIE_REG_PHB_VERSION 0x800
#define PCIE_REG_RESET 0x808
#define PCIE_REG_PHB_CONTROL 0x810
#define PCIE_REG_PHB_TIMEOUT_CONTROL1 0x878
#define PCIE_REG_PHB_QUIESCE_DMA 0x888
#define PCIE_REG_PHB_DMA_READ_TAG_ACTV 0x900
#define PCIE_REG_PHB_TCE_READ_TAG_ACTV 0x908
/* FIR registers */
#define PCIE_REG_LEM_FIR_ACCUM 0xc00
#define PCIE_REG_LEM_FIR_AND_MASK 0xc08
#define PCIE_REG_LEM_FIR_OR_MASK 0xc10
#define PCIE_REG_LEM_ACTION0 0xc18
#define PCIE_REG_LEM_ACTION1 0xc20
#define PCIE_REG_LEM_ERROR_MASK 0xc30
#define PCIE_REG_LEM_ERROR_AND_MASK 0xc38
#define PCIE_REG_LEM_ERROR_OR_MASK 0xc40
/* PHB Error registers */
#define PCIE_REG_PHB_ERR_STATUS 0xc80
#define PCIE_REG_PHB_ERR1_STATUS 0xc88
#define PCIE_REG_PHB_ERR_INJECT 0xc90
#define PCIE_REG_PHB_ERR_LEM_ENABLE 0xc98
#define PCIE_REG_PHB_ERR_IRQ_ENABLE 0xca0
#define PCIE_REG_PHB_ERR_FREEZE_ENABLE 0xca8
#define PCIE_REG_PHB_ERR_SIDE_ENABLE 0xcb8
#define PCIE_REG_PHB_ERR_LOG_0 0xcc0
#define PCIE_REG_PHB_ERR_LOG_1 0xcc8
#define PCIE_REG_PHB_ERR_STATUS_MASK 0xcd0
#define PCIE_REG_PHB_ERR1_STATUS_MASK 0xcd8
#define PCIE_REG_MMIO_ERR_STATUS 0xd00
#define PCIE_REG_MMIO_ERR1_STATUS 0xd08
#define PCIE_REG_MMIO_ERR_INJECT 0xd10
#define PCIE_REG_MMIO_ERR_LEM_ENABLE 0xd18
#define PCIE_REG_MMIO_ERR_IRQ_ENABLE 0xd20
#define PCIE_REG_MMIO_ERR_FREEZE_ENABLE 0xd28
#define PCIE_REG_MMIO_ERR_SIDE_ENABLE 0xd38
#define PCIE_REG_MMIO_ERR_LOG_0 0xd40
#define PCIE_REG_MMIO_ERR_LOG_1 0xd48
#define PCIE_REG_MMIO_ERR_STATUS_MASK 0xd50
#define PCIE_REG_MMIO_ERR1_STATUS_MASK 0xd58
#define PCIE_REG_DMA_ERR_STATUS 0xd80
#define PCIE_REG_DMA_ERR1_STATUS 0xd88
#define PCIE_REG_DMA_ERR_INJECT 0xd90
#define PCIE_REG_DMA_ERR_LEM_ENABLE 0xd98
#define PCIE_REG_DMA_ERR_IRQ_ENABLE 0xda0
#define PCIE_REG_DMA_ERR_FREEZE_ENABLE 0xda8
#define PCIE_REG_DMA_ERR_SIDE_ENABLE 0xdb8
#define PCIE_REG_DMA_ERR_LOG_0 0xdc0
#define PCIE_REG_DMA_ERR_LOG_1 0xdc8
#define PCIE_REG_DMA_ERR_STATUS_MASK 0xdd0
#define PCIE_REG_DMA_ERR1_STATUS_MASK 0xdd8
/* Shortcuts for access to the above using the PHB definitions
* with an offset
*/
#define PCIE_REG_ERR_PHB_OFFSET 0x0
#define PCIE_REG_ERR_MMIO_OFFSET 0x80
#define PCIE_REG_ERR_DMA_OFFSET 0x100
/* Debug and Trace registers */
#define PCIE_REG_PHB_DEBUG_CONTROL0 0xe00
#define PCIE_REG_PHB_DEBUG_STATUS0 0xe08
#define PCIE_REG_PHB_DEBUG_CONTROL1 0xe10
#define PCIE_REG_PHB_DEBUG_STATUS1 0xe18
#define PCIE_REG_PHB_DEBUG_CONTROL2 0xe20
#define PCIE_REG_PHB_DEBUG_STATUS2 0xe28
#define PCIE_REG_PHB_DEBUG_CONTROL3 0xe30
#define PCIE_REG_PHB_DEBUG_STATUS3 0xe38
#define PCIE_REG_PHB_DEBUG_CONTROL4 0xe40
#define PCIE_REG_PHB_DEBUG_STATUS4 0xe48
#define PCIE_REG_PHB_DEBUG_CONTROL5 0xe50
#define PCIE_REG_PHB_DEBUG_STATUS5 0xe58
#define PCIE_REG_PHB_DEBUG_CONTROL6 0xe60
#define PCIE_REG_PHB_DEBUG_STATUS6 0xe68
/* Definition for PCIe errors */
struct wsp_pcie_err_log_data {
__u64 phb_err;
__u64 phb_err1;
__u64 phb_log0;
__u64 phb_log1;
__u64 mmio_err;
__u64 mmio_err1;
__u64 mmio_log0;
__u64 mmio_log1;
__u64 dma_err;
__u64 dma_err1;
__u64 dma_log0;
__u64 dma_log1;
__u64 utl_sys_err;
__u64 utl_port_err;
__u64 utl_rc_err;
__u64 unused;
};
#endif /* __WSP_PCI_H */
...@@ -26,6 +26,7 @@ ...@@ -26,6 +26,7 @@
#include <asm/errno.h> #include <asm/errno.h>
#include <asm/xics.h> #include <asm/xics.h>
#include <asm/kvm_ppc.h> #include <asm/kvm_ppc.h>
#include <asm/dbell.h>
struct icp_ipl { struct icp_ipl {
union { union {
...@@ -145,7 +146,13 @@ static unsigned int icp_native_get_irq(void) ...@@ -145,7 +146,13 @@ static unsigned int icp_native_get_irq(void)
static void icp_native_cause_ipi(int cpu, unsigned long data) static void icp_native_cause_ipi(int cpu, unsigned long data)
{ {
kvmppc_set_host_ipi(cpu, 1); kvmppc_set_host_ipi(cpu, 1);
icp_native_set_qirr(cpu, IPI_PRIORITY); #ifdef CONFIG_PPC_DOORBELL
if (cpu_has_feature(CPU_FTR_DBELL) &&
(cpumask_test_cpu(cpu, cpu_sibling_mask(smp_processor_id()))))
doorbell_cause_ipi(cpu, data);
else
#endif
icp_native_set_qirr(cpu, IPI_PRIORITY);
} }
void xics_wake_cpu(int cpu) void xics_wake_cpu(int cpu)
......
...@@ -122,7 +122,7 @@ void xmon_printf(const char *format, ...) ...@@ -122,7 +122,7 @@ void xmon_printf(const char *format, ...)
if (n && rc == 0) { if (n && rc == 0) {
/* No udbg hooks, fallback to printk() - dangerous */ /* No udbg hooks, fallback to printk() - dangerous */
printk(xmon_outbuf); printk("%s", xmon_outbuf);
} }
} }
......
...@@ -73,12 +73,10 @@ static int fastsleep_loop(struct cpuidle_device *dev, ...@@ -73,12 +73,10 @@ static int fastsleep_loop(struct cpuidle_device *dev,
return index; return index;
new_lpcr = old_lpcr; new_lpcr = old_lpcr;
new_lpcr &= ~(LPCR_MER | LPCR_PECE); /* lpcr[mer] must be 0 */ /* Do not exit powersave upon decrementer as we've setup the timer
* offload.
/* exit powersave upon external interrupt, but not decrementer
* interrupt.
*/ */
new_lpcr |= LPCR_PECE0; new_lpcr &= ~LPCR_PECE1;
mtspr(SPRN_LPCR, new_lpcr); mtspr(SPRN_LPCR, new_lpcr);
power7_sleep(); power7_sleep();
......
...@@ -313,7 +313,7 @@ config CRYPTO_DEV_S5P ...@@ -313,7 +313,7 @@ config CRYPTO_DEV_S5P
config CRYPTO_DEV_NX config CRYPTO_DEV_NX
bool "Support for IBM Power7+ in-Nest cryptographic acceleration" bool "Support for IBM Power7+ in-Nest cryptographic acceleration"
depends on PPC64 && IBMVIO depends on PPC64 && IBMVIO && !CPU_LITTLE_ENDIAN
default n default n
help help
Support for Power7+ in-Nest cryptographic acceleration. Support for Power7+ in-Nest cryptographic acceleration.
......
...@@ -13,7 +13,7 @@ CFLAGS := -Wall -O2 -flto -Wall -Werror -DGIT_VERSION='"$(GIT_VERSION)"' -I$(CUR ...@@ -13,7 +13,7 @@ CFLAGS := -Wall -O2 -flto -Wall -Werror -DGIT_VERSION='"$(GIT_VERSION)"' -I$(CUR
export CC CFLAGS export CC CFLAGS
TARGETS = pmu copyloops mm TARGETS = pmu copyloops mm tm
endif endif
......
...@@ -30,12 +30,15 @@ int run_test(int (test_function)(void), char *name) ...@@ -30,12 +30,15 @@ int run_test(int (test_function)(void), char *name)
pid = fork(); pid = fork();
if (pid == 0) { if (pid == 0) {
setpgid(0, 0);
exit(test_function()); exit(test_function());
} else if (pid == -1) { } else if (pid == -1) {
perror("fork"); perror("fork");
return 1; return 1;
} }
setpgid(pid, pid);
/* Wake us up in timeout seconds */ /* Wake us up in timeout seconds */
alarm(TIMEOUT); alarm(TIMEOUT);
terminated = false; terminated = false;
...@@ -50,17 +53,20 @@ int run_test(int (test_function)(void), char *name) ...@@ -50,17 +53,20 @@ int run_test(int (test_function)(void), char *name)
if (terminated) { if (terminated) {
printf("!! force killing %s\n", name); printf("!! force killing %s\n", name);
kill(pid, SIGKILL); kill(-pid, SIGKILL);
return 1; return 1;
} else { } else {
printf("!! killing %s\n", name); printf("!! killing %s\n", name);
kill(pid, SIGTERM); kill(-pid, SIGTERM);
terminated = true; terminated = true;
alarm(KILL_TIMEOUT); alarm(KILL_TIMEOUT);
goto wait; goto wait;
} }
} }
/* Kill anything else in the process group that is still running */
kill(-pid, SIGTERM);
if (WIFEXITED(status)) if (WIFEXITED(status))
status = WEXITSTATUS(status); status = WEXITSTATUS(status);
else { else {
...@@ -99,7 +105,10 @@ int test_harness(int (test_function)(void), char *name) ...@@ -99,7 +105,10 @@ int test_harness(int (test_function)(void), char *name)
rc = run_test(test_function, name); rc = run_test(test_function, name);
test_finish(name, rc); if (rc == MAGIC_SKIP_RETURN_VALUE)
test_skip(name);
else
test_finish(name, rc);
return rc; return rc;
} }
...@@ -4,7 +4,7 @@ noarg: ...@@ -4,7 +4,7 @@ noarg:
PROGS := count_instructions PROGS := count_instructions
EXTRA_SOURCES := ../harness.c event.c EXTRA_SOURCES := ../harness.c event.c
all: $(PROGS) all: $(PROGS) sub_all
$(PROGS): $(EXTRA_SOURCES) $(PROGS): $(EXTRA_SOURCES)
...@@ -12,12 +12,30 @@ $(PROGS): $(EXTRA_SOURCES) ...@@ -12,12 +12,30 @@ $(PROGS): $(EXTRA_SOURCES)
count_instructions: loop.S count_instructions.c $(EXTRA_SOURCES) count_instructions: loop.S count_instructions.c $(EXTRA_SOURCES)
$(CC) $(CFLAGS) -m64 -o $@ $^ $(CC) $(CFLAGS) -m64 -o $@ $^
run_tests: all run_tests: all sub_run_tests
@-for PROG in $(PROGS); do \ @-for PROG in $(PROGS); do \
./$$PROG; \ ./$$PROG; \
done; done;
clean: clean: sub_clean
rm -f $(PROGS) loop.o rm -f $(PROGS) loop.o
.PHONY: all run_tests clean
SUB_TARGETS = ebb
sub_all:
@for TARGET in $(SUB_TARGETS); do \
$(MAKE) -C $$TARGET all; \
done;
sub_run_tests: all
@for TARGET in $(SUB_TARGETS); do \
$(MAKE) -C $$TARGET run_tests; \
done;
sub_clean:
@for TARGET in $(SUB_TARGETS); do \
$(MAKE) -C $$TARGET clean; \
done;
.PHONY: all run_tests clean sub_all sub_run_tests sub_clean
noarg:
$(MAKE) -C ../../
# The EBB handler is 64-bit code and everything links against it
CFLAGS += -m64
PROGS := reg_access_test event_attributes_test cycles_test \
cycles_with_freeze_test pmc56_overflow_test \
ebb_vs_cpu_event_test cpu_event_vs_ebb_test \
cpu_event_pinned_vs_ebb_test task_event_vs_ebb_test \
task_event_pinned_vs_ebb_test multi_ebb_procs_test \
multi_counter_test pmae_handling_test \
close_clears_pmcc_test instruction_count_test \
fork_cleanup_test ebb_on_child_test \
ebb_on_willing_child_test back_to_back_ebbs_test \
lost_exception_test no_handler_test
all: $(PROGS)
$(PROGS): ../../harness.c ../event.c ../lib.c ebb.c ebb_handler.S trace.c
instruction_count_test: ../loop.S
lost_exception_test: ../lib.c
run_tests: all
@-for PROG in $(PROGS); do \
./$$PROG; \
done;
clean:
rm -f $(PROGS)
/*
* Copyright 2014, Michael Ellerman, IBM Corp.
* Licensed under GPLv2.
*/
#include <stdbool.h>
#include <stdio.h>
#include <stdlib.h>
#include "ebb.h"
#define NUMBER_OF_EBBS 50
/*
* Test that if we overflow the counter while in the EBB handler, we take
* another EBB on exiting from the handler.
*
* We do this by counting with a stupidly low sample period, causing us to
* overflow the PMU while we're still in the EBB handler, leading to another
* EBB.
*
* We get out of what would otherwise be an infinite loop by leaving the
* counter frozen once we've taken enough EBBs.
*/
static void ebb_callee(void)
{
uint64_t siar, val;
val = mfspr(SPRN_BESCR);
if (!(val & BESCR_PMEO)) {
ebb_state.stats.spurious++;
goto out;
}
ebb_state.stats.ebb_count++;
trace_log_counter(ebb_state.trace, ebb_state.stats.ebb_count);
/* Resets the PMC */
count_pmc(1, sample_period);
out:
if (ebb_state.stats.ebb_count == NUMBER_OF_EBBS)
/* Reset but leave counters frozen */
reset_ebb_with_clear_mask(MMCR0_PMAO);
else
/* Unfreezes */
reset_ebb();
/* Do some stuff to chew some cycles and pop the counter */
siar = mfspr(SPRN_SIAR);
trace_log_reg(ebb_state.trace, SPRN_SIAR, siar);
val = mfspr(SPRN_PMC1);
trace_log_reg(ebb_state.trace, SPRN_PMC1, val);
val = mfspr(SPRN_MMCR0);
trace_log_reg(ebb_state.trace, SPRN_MMCR0, val);
}
int back_to_back_ebbs(void)
{
struct event event;
event_init_named(&event, 0x1001e, "cycles");
event_leader_ebb_init(&event);
event.attr.exclude_kernel = 1;
event.attr.exclude_hv = 1;
event.attr.exclude_idle = 1;
FAIL_IF(event_open(&event));
setup_ebb_handler(ebb_callee);
FAIL_IF(ebb_event_enable(&event));
sample_period = 5;
ebb_freeze_pmcs();
mtspr(SPRN_PMC1, pmc_sample_period(sample_period));
ebb_global_enable();
ebb_unfreeze_pmcs();
while (ebb_state.stats.ebb_count < NUMBER_OF_EBBS)
FAIL_IF(core_busy_loop());
ebb_global_disable();
ebb_freeze_pmcs();
count_pmc(1, sample_period);
dump_ebb_state();
event_close(&event);
FAIL_IF(ebb_state.stats.ebb_count != NUMBER_OF_EBBS);
return 0;
}
int main(void)
{
return test_harness(back_to_back_ebbs, "back_to_back_ebbs");
}
/*
* Copyright 2014, Michael Ellerman, IBM Corp.
* Licensed under GPLv2.
*/
#include <stdio.h>
#include <stdlib.h>
#include <setjmp.h>
#include <signal.h>
#include "ebb.h"
/*
* Test that closing the EBB event clears MMCR0_PMCC, preventing further access
* by userspace to the PMU hardware.
*/
int close_clears_pmcc(void)
{
struct event event;
event_init_named(&event, 0x1001e, "cycles");
event_leader_ebb_init(&event);
FAIL_IF(event_open(&event));
ebb_enable_pmc_counting(1);
setup_ebb_handler(standard_ebb_callee);
ebb_global_enable();
FAIL_IF(ebb_event_enable(&event));
mtspr(SPRN_PMC1, pmc_sample_period(sample_period));
while (ebb_state.stats.ebb_count < 1)
FAIL_IF(core_busy_loop());
ebb_global_disable();
event_close(&event);
FAIL_IF(ebb_state.stats.ebb_count == 0);
/* The real test is here, do we take a SIGILL when writing PMU regs now
* that we have closed the event. We expect that we will. */
FAIL_IF(catch_sigill(write_pmc1));
/* We should still be able to read EBB regs though */
mfspr(SPRN_EBBHR);
mfspr(SPRN_EBBRR);
mfspr(SPRN_BESCR);
return 0;
}
int main(void)
{
return test_harness(close_clears_pmcc, "close_clears_pmcc");
}
/*
* Copyright 2014, Michael Ellerman, IBM Corp.
* Licensed under GPLv2.
*/
#include <signal.h>
#include <stdio.h>
#include <stdlib.h>
#include <stdbool.h>
#include <sys/types.h>
#include <sys/wait.h>
#include <unistd.h>
#include "ebb.h"
/*
* Tests a pinned cpu event vs an EBB - in that order. The pinned cpu event
* should remain and the EBB event should fail to enable.
*/
static int setup_cpu_event(struct event *event, int cpu)
{
event_init_named(event, 0x400FA, "PM_RUN_INST_CMPL");
event->attr.pinned = 1;
event->attr.exclude_kernel = 1;
event->attr.exclude_hv = 1;
event->attr.exclude_idle = 1;
SKIP_IF(require_paranoia_below(1));
FAIL_IF(event_open_with_cpu(event, cpu));
FAIL_IF(event_enable(event));
return 0;
}
int cpu_event_pinned_vs_ebb(void)
{
union pipe read_pipe, write_pipe;
struct event event;
int cpu, rc;
pid_t pid;
cpu = pick_online_cpu();
FAIL_IF(cpu < 0);
FAIL_IF(bind_to_cpu(cpu));
FAIL_IF(pipe(read_pipe.fds) == -1);
FAIL_IF(pipe(write_pipe.fds) == -1);
pid = fork();
if (pid == 0) {
/* NB order of pipes looks reversed */
exit(ebb_child(write_pipe, read_pipe));
}
/* We setup the cpu event first */
rc = setup_cpu_event(&event, cpu);
if (rc) {
kill_child_and_wait(pid);
return rc;
}
/* Signal the child to install its EBB event and wait */
if (sync_with_child(read_pipe, write_pipe))
/* If it fails, wait for it to exit */
goto wait;
/* Signal the child to run */
FAIL_IF(sync_with_child(read_pipe, write_pipe));
wait:
/* We expect it to fail to read the event */
FAIL_IF(wait_for_child(pid) != 2);
FAIL_IF(event_disable(&event));
FAIL_IF(event_read(&event));
event_report(&event);
/* The cpu event should have run */
FAIL_IF(event.result.value == 0);
FAIL_IF(event.result.enabled != event.result.running);
return 0;
}
int main(void)
{
return test_harness(cpu_event_pinned_vs_ebb, "cpu_event_pinned_vs_ebb");
}
/*
* Copyright 2014, Michael Ellerman, IBM Corp.
* Licensed under GPLv2.
*/
#include <signal.h>
#include <stdio.h>
#include <stdlib.h>
#include <stdbool.h>
#include <sys/types.h>
#include <sys/wait.h>
#include <unistd.h>
#include "ebb.h"
/*
* Tests a cpu event vs an EBB - in that order. The EBB should force the cpu
* event off the PMU.
*/
static int setup_cpu_event(struct event *event, int cpu)
{
event_init_named(event, 0x400FA, "PM_RUN_INST_CMPL");
event->attr.exclude_kernel = 1;
event->attr.exclude_hv = 1;
event->attr.exclude_idle = 1;
SKIP_IF(require_paranoia_below(1));
FAIL_IF(event_open_with_cpu(event, cpu));
FAIL_IF(event_enable(event));
return 0;
}
int cpu_event_vs_ebb(void)
{
union pipe read_pipe, write_pipe;
struct event event;
int cpu, rc;
pid_t pid;
cpu = pick_online_cpu();
FAIL_IF(cpu < 0);
FAIL_IF(bind_to_cpu(cpu));
FAIL_IF(pipe(read_pipe.fds) == -1);
FAIL_IF(pipe(write_pipe.fds) == -1);
pid = fork();
if (pid == 0) {
/* NB order of pipes looks reversed */
exit(ebb_child(write_pipe, read_pipe));
}
/* We setup the cpu event first */
rc = setup_cpu_event(&event, cpu);
if (rc) {
kill_child_and_wait(pid);
return rc;
}
/* Signal the child to install its EBB event and wait */
if (sync_with_child(read_pipe, write_pipe))
/* If it fails, wait for it to exit */
goto wait;
/* Signal the child to run */
FAIL_IF(sync_with_child(read_pipe, write_pipe));
wait:
/* We expect the child to succeed */
FAIL_IF(wait_for_child(pid));
FAIL_IF(event_disable(&event));
FAIL_IF(event_read(&event));
event_report(&event);
/* The cpu event may have run */
return 0;
}
int main(void)
{
return test_harness(cpu_event_vs_ebb, "cpu_event_vs_ebb");
}
/*
* Copyright 2014, Michael Ellerman, IBM Corp.
* Licensed under GPLv2.
*/
#include <stdio.h>
#include <stdlib.h>
#include "ebb.h"
/*
* Basic test that counts user cycles and takes EBBs.
*/
int cycles(void)
{
struct event event;
event_init_named(&event, 0x1001e, "cycles");
event_leader_ebb_init(&event);
event.attr.exclude_kernel = 1;
event.attr.exclude_hv = 1;
event.attr.exclude_idle = 1;
FAIL_IF(event_open(&event));
ebb_enable_pmc_counting(1);
setup_ebb_handler(standard_ebb_callee);
ebb_global_enable();
FAIL_IF(ebb_event_enable(&event));
mtspr(SPRN_PMC1, pmc_sample_period(sample_period));
while (ebb_state.stats.ebb_count < 10) {
FAIL_IF(core_busy_loop());
FAIL_IF(ebb_check_mmcr0());
}
ebb_global_disable();
ebb_freeze_pmcs();
count_pmc(1, sample_period);
dump_ebb_state();
event_close(&event);
FAIL_IF(ebb_state.stats.ebb_count == 0);
FAIL_IF(!ebb_check_count(1, sample_period, 100));
return 0;
}
int main(void)
{
return test_harness(cycles, "cycles");
}
/*
* Copyright 2014, Michael Ellerman, IBM Corp.
* Licensed under GPLv2.
*/
#include <stdio.h>
#include <stdlib.h>
#include <stdbool.h>
#include "ebb.h"
/*
* Test of counting cycles while using MMCR0_FC (freeze counters) to only count
* parts of the code. This is complicated by the fact that FC is set by the
* hardware when the event overflows. We may take the EBB after we have set FC,
* so we have to be careful about whether we clear FC at the end of the EBB
* handler or not.
*/
static bool counters_frozen = false;
static int ebbs_while_frozen = 0;
static void ebb_callee(void)
{
uint64_t mask, val;
mask = MMCR0_PMAO | MMCR0_FC;
val = mfspr(SPRN_BESCR);
if (!(val & BESCR_PMEO)) {
ebb_state.stats.spurious++;
goto out;
}
ebb_state.stats.ebb_count++;
trace_log_counter(ebb_state.trace, ebb_state.stats.ebb_count);
val = mfspr(SPRN_MMCR0);
trace_log_reg(ebb_state.trace, SPRN_MMCR0, val);
if (counters_frozen) {
trace_log_string(ebb_state.trace, "frozen");
ebbs_while_frozen++;
mask &= ~MMCR0_FC;
}
count_pmc(1, sample_period);
out:
reset_ebb_with_clear_mask(mask);
}
int cycles_with_freeze(void)
{
struct event event;
uint64_t val;
bool fc_cleared;
event_init_named(&event, 0x1001e, "cycles");
event_leader_ebb_init(&event);
event.attr.exclude_kernel = 1;
event.attr.exclude_hv = 1;
event.attr.exclude_idle = 1;
FAIL_IF(event_open(&event));
setup_ebb_handler(ebb_callee);
ebb_global_enable();
FAIL_IF(ebb_event_enable(&event));
mtspr(SPRN_PMC1, pmc_sample_period(sample_period));
fc_cleared = false;
/* Make sure we loop until we take at least one EBB */
while ((ebb_state.stats.ebb_count < 20 && !fc_cleared) ||
ebb_state.stats.ebb_count < 1)
{
counters_frozen = false;
mb();
mtspr(SPRN_MMCR0, mfspr(SPRN_MMCR0) & ~MMCR0_FC);
FAIL_IF(core_busy_loop());
counters_frozen = true;
mb();
mtspr(SPRN_MMCR0, mfspr(SPRN_MMCR0) | MMCR0_FC);
val = mfspr(SPRN_MMCR0);
if (! (val & MMCR0_FC)) {
printf("Outside of loop, FC NOT set MMCR0 0x%lx\n", val);
fc_cleared = true;
}
}
ebb_global_disable();
ebb_freeze_pmcs();
count_pmc(1, sample_period);
dump_ebb_state();
printf("EBBs while frozen %d\n", ebbs_while_frozen);
event_close(&event);
FAIL_IF(ebb_state.stats.ebb_count == 0);
FAIL_IF(fc_cleared);
return 0;
}
int main(void)
{
return test_harness(cycles_with_freeze, "cycles_with_freeze");
}
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment