Commit f94def76 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'merge' of git://git.kernel.org/pub/scm/linux/kernel/git/benh/powerpc

Pull powerpc fixes from Ben Herrenschmidt:
 "Here are a few more powerpc fixes for 3.14.

  Most of these are also CC'ed to stable and fix bugs in new
  functionality introduced in the last 2 or 3 versions"

* 'merge' of git://git.kernel.org/pub/scm/linux/kernel/git/benh/powerpc:
  powerpc/powernv: Fix indirect XSCOM unmangling
  powerpc/powernv: Fix opal_xscom_{read,write} prototype
  powerpc/powernv: Refactor PHB diag-data dump
  powerpc/powernv: Dump PHB diag-data immediately
  powerpc: Increase stack redzone for 64-bit userspace to 512 bytes
  powerpc/ftrace: bugfix for test_24bit_addr
  powerpc/crashdump : Fix page frame number check in copy_oldmem_page
  powerpc/le: Ensure that the 'stop-self' RTAS token is handled correctly
parents 86c7654f e0cf9576
...@@ -200,10 +200,11 @@ static inline void __user *arch_compat_alloc_user_space(long len) ...@@ -200,10 +200,11 @@ static inline void __user *arch_compat_alloc_user_space(long len)
/* /*
* We can't access below the stack pointer in the 32bit ABI and * We can't access below the stack pointer in the 32bit ABI and
* can access 288 bytes in the 64bit ABI * can access 288 bytes in the 64bit big-endian ABI,
* or 512 bytes with the new ELFv2 little-endian ABI.
*/ */
if (!is_32bit_task()) if (!is_32bit_task())
usp -= 288; usp -= USER_REDZONE_SIZE;
return (void __user *) (usp - len); return (void __user *) (usp - len);
} }
......
...@@ -816,8 +816,8 @@ int64_t opal_pci_next_error(uint64_t phb_id, uint64_t *first_frozen_pe, ...@@ -816,8 +816,8 @@ int64_t opal_pci_next_error(uint64_t phb_id, uint64_t *first_frozen_pe,
int64_t opal_pci_poll(uint64_t phb_id); int64_t opal_pci_poll(uint64_t phb_id);
int64_t opal_return_cpu(void); int64_t opal_return_cpu(void);
int64_t opal_xscom_read(uint32_t gcid, uint32_t pcb_addr, __be64 *val); int64_t opal_xscom_read(uint32_t gcid, uint64_t pcb_addr, __be64 *val);
int64_t opal_xscom_write(uint32_t gcid, uint32_t pcb_addr, uint64_t val); int64_t opal_xscom_write(uint32_t gcid, uint64_t pcb_addr, uint64_t val);
int64_t opal_lpc_write(uint32_t chip_id, enum OpalLPCAddressType addr_type, int64_t opal_lpc_write(uint32_t chip_id, enum OpalLPCAddressType addr_type,
uint32_t addr, uint32_t data, uint32_t sz); uint32_t addr, uint32_t data, uint32_t sz);
......
...@@ -28,11 +28,23 @@ ...@@ -28,11 +28,23 @@
#ifdef __powerpc64__ #ifdef __powerpc64__
/*
* Size of redzone that userspace is allowed to use below the stack
* pointer. This is 288 in the 64-bit big-endian ELF ABI, and 512 in
* the new ELFv2 little-endian ABI, so we allow the larger amount.
*
* For kernel code we allow a 288-byte redzone, in order to conserve
* kernel stack space; gcc currently only uses 288 bytes, and will
* hopefully allow explicit control of the redzone size in future.
*/
#define USER_REDZONE_SIZE 512
#define KERNEL_REDZONE_SIZE 288
#define STACK_FRAME_OVERHEAD 112 /* size of minimum stack frame */ #define STACK_FRAME_OVERHEAD 112 /* size of minimum stack frame */
#define STACK_FRAME_LR_SAVE 2 /* Location of LR in stack frame */ #define STACK_FRAME_LR_SAVE 2 /* Location of LR in stack frame */
#define STACK_FRAME_REGS_MARKER ASM_CONST(0x7265677368657265) #define STACK_FRAME_REGS_MARKER ASM_CONST(0x7265677368657265)
#define STACK_INT_FRAME_SIZE (sizeof(struct pt_regs) + \ #define STACK_INT_FRAME_SIZE (sizeof(struct pt_regs) + \
STACK_FRAME_OVERHEAD + 288) STACK_FRAME_OVERHEAD + KERNEL_REDZONE_SIZE)
#define STACK_FRAME_MARKER 12 #define STACK_FRAME_MARKER 12
/* Size of dummy stack frame allocated when calling signal handler. */ /* Size of dummy stack frame allocated when calling signal handler. */
...@@ -41,6 +53,8 @@ ...@@ -41,6 +53,8 @@
#else /* __powerpc64__ */ #else /* __powerpc64__ */
#define USER_REDZONE_SIZE 0
#define KERNEL_REDZONE_SIZE 0
#define STACK_FRAME_OVERHEAD 16 /* size of minimum stack frame */ #define STACK_FRAME_OVERHEAD 16 /* size of minimum stack frame */
#define STACK_FRAME_LR_SAVE 1 /* Location of LR in stack frame */ #define STACK_FRAME_LR_SAVE 1 /* Location of LR in stack frame */
#define STACK_FRAME_REGS_MARKER ASM_CONST(0x72656773) #define STACK_FRAME_REGS_MARKER ASM_CONST(0x72656773)
......
...@@ -98,17 +98,19 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf, ...@@ -98,17 +98,19 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
size_t csize, unsigned long offset, int userbuf) size_t csize, unsigned long offset, int userbuf)
{ {
void *vaddr; void *vaddr;
phys_addr_t paddr;
if (!csize) if (!csize)
return 0; return 0;
csize = min_t(size_t, csize, PAGE_SIZE); csize = min_t(size_t, csize, PAGE_SIZE);
paddr = pfn << PAGE_SHIFT;
if ((min_low_pfn < pfn) && (pfn < max_pfn)) { if (memblock_is_region_memory(paddr, csize)) {
vaddr = __va(pfn << PAGE_SHIFT); vaddr = __va(paddr);
csize = copy_oldmem_vaddr(vaddr, buf, csize, offset, userbuf); csize = copy_oldmem_vaddr(vaddr, buf, csize, offset, userbuf);
} else { } else {
vaddr = __ioremap(pfn << PAGE_SHIFT, PAGE_SIZE, 0); vaddr = __ioremap(paddr, PAGE_SIZE, 0);
csize = copy_oldmem_vaddr(vaddr, buf, csize, offset, userbuf); csize = copy_oldmem_vaddr(vaddr, buf, csize, offset, userbuf);
iounmap(vaddr); iounmap(vaddr);
} }
......
...@@ -74,6 +74,7 @@ ftrace_modify_code(unsigned long ip, unsigned int old, unsigned int new) ...@@ -74,6 +74,7 @@ ftrace_modify_code(unsigned long ip, unsigned int old, unsigned int new)
*/ */
static int test_24bit_addr(unsigned long ip, unsigned long addr) static int test_24bit_addr(unsigned long ip, unsigned long addr)
{ {
addr = ppc_function_entry((void *)addr);
/* use the create_branch to verify that this offset can be branched */ /* use the create_branch to verify that this offset can be branched */
return create_branch((unsigned int *)ip, addr, 0); return create_branch((unsigned int *)ip, addr, 0);
......
...@@ -65,8 +65,8 @@ struct rt_sigframe { ...@@ -65,8 +65,8 @@ struct rt_sigframe {
struct siginfo __user *pinfo; struct siginfo __user *pinfo;
void __user *puc; void __user *puc;
struct siginfo info; struct siginfo info;
/* 64 bit ABI allows for 288 bytes below sp before decrementing it. */ /* New 64 bit little-endian ABI allows redzone of 512 bytes below sp */
char abigap[288]; char abigap[USER_REDZONE_SIZE];
} __attribute__ ((aligned (16))); } __attribute__ ((aligned (16)));
static const char fmt32[] = KERN_INFO \ static const char fmt32[] = KERN_INFO \
......
...@@ -114,6 +114,7 @@ DEFINE_SIMPLE_ATTRIBUTE(ioda_eeh_inbB_dbgfs_ops, ioda_eeh_inbB_dbgfs_get, ...@@ -114,6 +114,7 @@ DEFINE_SIMPLE_ATTRIBUTE(ioda_eeh_inbB_dbgfs_ops, ioda_eeh_inbB_dbgfs_get,
ioda_eeh_inbB_dbgfs_set, "0x%llx\n"); ioda_eeh_inbB_dbgfs_set, "0x%llx\n");
#endif /* CONFIG_DEBUG_FS */ #endif /* CONFIG_DEBUG_FS */
/** /**
* ioda_eeh_post_init - Chip dependent post initialization * ioda_eeh_post_init - Chip dependent post initialization
* @hose: PCI controller * @hose: PCI controller
...@@ -221,6 +222,22 @@ static int ioda_eeh_set_option(struct eeh_pe *pe, int option) ...@@ -221,6 +222,22 @@ static int ioda_eeh_set_option(struct eeh_pe *pe, int option)
return ret; return ret;
} }
static void ioda_eeh_phb_diag(struct pci_controller *hose)
{
struct pnv_phb *phb = hose->private_data;
long rc;
rc = opal_pci_get_phb_diag_data2(phb->opal_id, phb->diag.blob,
PNV_PCI_DIAG_BUF_SIZE);
if (rc != OPAL_SUCCESS) {
pr_warning("%s: Failed to get diag-data for PHB#%x (%ld)\n",
__func__, hose->global_number, rc);
return;
}
pnv_pci_dump_phb_diag_data(hose, phb->diag.blob);
}
/** /**
* ioda_eeh_get_state - Retrieve the state of PE * ioda_eeh_get_state - Retrieve the state of PE
* @pe: EEH PE * @pe: EEH PE
...@@ -272,6 +289,9 @@ static int ioda_eeh_get_state(struct eeh_pe *pe) ...@@ -272,6 +289,9 @@ static int ioda_eeh_get_state(struct eeh_pe *pe)
result |= EEH_STATE_DMA_ACTIVE; result |= EEH_STATE_DMA_ACTIVE;
result |= EEH_STATE_MMIO_ENABLED; result |= EEH_STATE_MMIO_ENABLED;
result |= EEH_STATE_DMA_ENABLED; result |= EEH_STATE_DMA_ENABLED;
} else if (!(pe->state & EEH_PE_ISOLATED)) {
eeh_pe_state_mark(pe, EEH_PE_ISOLATED);
ioda_eeh_phb_diag(hose);
} }
return result; return result;
...@@ -315,6 +335,15 @@ static int ioda_eeh_get_state(struct eeh_pe *pe) ...@@ -315,6 +335,15 @@ static int ioda_eeh_get_state(struct eeh_pe *pe)
__func__, fstate, hose->global_number, pe_no); __func__, fstate, hose->global_number, pe_no);
} }
/* Dump PHB diag-data for frozen PE */
if (result != EEH_STATE_NOT_SUPPORT &&
(result & (EEH_STATE_MMIO_ACTIVE | EEH_STATE_DMA_ACTIVE)) !=
(EEH_STATE_MMIO_ACTIVE | EEH_STATE_DMA_ACTIVE) &&
!(pe->state & EEH_PE_ISOLATED)) {
eeh_pe_state_mark(pe, EEH_PE_ISOLATED);
ioda_eeh_phb_diag(hose);
}
return result; return result;
} }
...@@ -529,42 +558,6 @@ static int ioda_eeh_reset(struct eeh_pe *pe, int option) ...@@ -529,42 +558,6 @@ static int ioda_eeh_reset(struct eeh_pe *pe, int option)
return ret; return ret;
} }
/**
* ioda_eeh_get_log - Retrieve error log
* @pe: EEH PE
* @severity: Severity level of the log
* @drv_log: buffer to store the log
* @len: space of the log buffer
*
* The function is used to retrieve error log from P7IOC.
*/
static int ioda_eeh_get_log(struct eeh_pe *pe, int severity,
char *drv_log, unsigned long len)
{
s64 ret;
unsigned long flags;
struct pci_controller *hose = pe->phb;
struct pnv_phb *phb = hose->private_data;
spin_lock_irqsave(&phb->lock, flags);
ret = opal_pci_get_phb_diag_data2(phb->opal_id,
phb->diag.blob, PNV_PCI_DIAG_BUF_SIZE);
if (ret) {
spin_unlock_irqrestore(&phb->lock, flags);
pr_warning("%s: Can't get log for PHB#%x-PE#%x (%lld)\n",
__func__, hose->global_number, pe->addr, ret);
return -EIO;
}
/* The PHB diag-data is always indicative */
pnv_pci_dump_phb_diag_data(hose, phb->diag.blob);
spin_unlock_irqrestore(&phb->lock, flags);
return 0;
}
/** /**
* ioda_eeh_configure_bridge - Configure the PCI bridges for the indicated PE * ioda_eeh_configure_bridge - Configure the PCI bridges for the indicated PE
* @pe: EEH PE * @pe: EEH PE
...@@ -646,22 +639,6 @@ static void ioda_eeh_hub_diag(struct pci_controller *hose) ...@@ -646,22 +639,6 @@ static void ioda_eeh_hub_diag(struct pci_controller *hose)
} }
} }
static void ioda_eeh_phb_diag(struct pci_controller *hose)
{
struct pnv_phb *phb = hose->private_data;
long rc;
rc = opal_pci_get_phb_diag_data2(phb->opal_id, phb->diag.blob,
PNV_PCI_DIAG_BUF_SIZE);
if (rc != OPAL_SUCCESS) {
pr_warning("%s: Failed to get diag-data for PHB#%x (%ld)\n",
__func__, hose->global_number, rc);
return;
}
pnv_pci_dump_phb_diag_data(hose, phb->diag.blob);
}
static int ioda_eeh_get_phb_pe(struct pci_controller *hose, static int ioda_eeh_get_phb_pe(struct pci_controller *hose,
struct eeh_pe **pe) struct eeh_pe **pe)
{ {
...@@ -834,6 +811,20 @@ static int ioda_eeh_next_error(struct eeh_pe **pe) ...@@ -834,6 +811,20 @@ static int ioda_eeh_next_error(struct eeh_pe **pe)
__func__, err_type); __func__, err_type);
} }
/*
* EEH core will try recover from fenced PHB or
* frozen PE. In the time for frozen PE, EEH core
* enable IO path for that before collecting logs,
* but it ruins the site. So we have to dump the
* log in advance here.
*/
if ((ret == EEH_NEXT_ERR_FROZEN_PE ||
ret == EEH_NEXT_ERR_FENCED_PHB) &&
!((*pe)->state & EEH_PE_ISOLATED)) {
eeh_pe_state_mark(*pe, EEH_PE_ISOLATED);
ioda_eeh_phb_diag(hose);
}
/* /*
* If we have no errors on the specific PHB or only * If we have no errors on the specific PHB or only
* informative error there, we continue poking it. * informative error there, we continue poking it.
...@@ -852,7 +843,6 @@ struct pnv_eeh_ops ioda_eeh_ops = { ...@@ -852,7 +843,6 @@ struct pnv_eeh_ops ioda_eeh_ops = {
.set_option = ioda_eeh_set_option, .set_option = ioda_eeh_set_option,
.get_state = ioda_eeh_get_state, .get_state = ioda_eeh_get_state,
.reset = ioda_eeh_reset, .reset = ioda_eeh_reset,
.get_log = ioda_eeh_get_log,
.configure_bridge = ioda_eeh_configure_bridge, .configure_bridge = ioda_eeh_configure_bridge,
.next_error = ioda_eeh_next_error .next_error = ioda_eeh_next_error
}; };
...@@ -71,11 +71,11 @@ static int opal_xscom_err_xlate(int64_t rc) ...@@ -71,11 +71,11 @@ static int opal_xscom_err_xlate(int64_t rc)
} }
} }
static u64 opal_scom_unmangle(u64 reg) static u64 opal_scom_unmangle(u64 addr)
{ {
/* /*
* XSCOM indirect addresses have the top bit set. Additionally * XSCOM indirect addresses have the top bit set. Additionally
* the reset of the top 3 nibbles is always 0. * the rest of the top 3 nibbles is always 0.
* *
* Because the debugfs interface uses signed offsets and shifts * Because the debugfs interface uses signed offsets and shifts
* the address left by 3, we basically cannot use the top 4 bits * the address left by 3, we basically cannot use the top 4 bits
...@@ -86,10 +86,13 @@ static u64 opal_scom_unmangle(u64 reg) ...@@ -86,10 +86,13 @@ static u64 opal_scom_unmangle(u64 reg)
* conversion here. To leave room for further xscom address * conversion here. To leave room for further xscom address
* expansion, we only clear out the top byte * expansion, we only clear out the top byte
* *
* For in-kernel use, we also support the real indirect bit, so
* we test for any of the top 5 bits
*
*/ */
if (reg & (1ull << 59)) if (addr & (0x1full << 59))
reg = (reg & ~(0xffull << 56)) | (1ull << 63); addr = (addr & ~(0xffull << 56)) | (1ull << 63);
return reg; return addr;
} }
static int opal_scom_read(scom_map_t map, u64 reg, u64 *value) static int opal_scom_read(scom_map_t map, u64 reg, u64 *value)
...@@ -98,8 +101,8 @@ static int opal_scom_read(scom_map_t map, u64 reg, u64 *value) ...@@ -98,8 +101,8 @@ static int opal_scom_read(scom_map_t map, u64 reg, u64 *value)
int64_t rc; int64_t rc;
__be64 v; __be64 v;
reg = opal_scom_unmangle(reg); reg = opal_scom_unmangle(m->addr + reg);
rc = opal_xscom_read(m->chip, m->addr + reg, (__be64 *)__pa(&v)); rc = opal_xscom_read(m->chip, reg, (__be64 *)__pa(&v));
*value = be64_to_cpu(v); *value = be64_to_cpu(v);
return opal_xscom_err_xlate(rc); return opal_xscom_err_xlate(rc);
} }
...@@ -109,8 +112,8 @@ static int opal_scom_write(scom_map_t map, u64 reg, u64 value) ...@@ -109,8 +112,8 @@ static int opal_scom_write(scom_map_t map, u64 reg, u64 value)
struct opal_scom_map *m = map; struct opal_scom_map *m = map;
int64_t rc; int64_t rc;
reg = opal_scom_unmangle(reg); reg = opal_scom_unmangle(m->addr + reg);
rc = opal_xscom_write(m->chip, m->addr + reg, value); rc = opal_xscom_write(m->chip, reg, value);
return opal_xscom_err_xlate(rc); return opal_xscom_err_xlate(rc);
} }
......
...@@ -134,57 +134,72 @@ static void pnv_pci_dump_p7ioc_diag_data(struct pci_controller *hose, ...@@ -134,57 +134,72 @@ static void pnv_pci_dump_p7ioc_diag_data(struct pci_controller *hose,
pr_info("P7IOC PHB#%d Diag-data (Version: %d)\n\n", pr_info("P7IOC PHB#%d Diag-data (Version: %d)\n\n",
hose->global_number, common->version); hose->global_number, common->version);
pr_info(" brdgCtl: %08x\n", data->brdgCtl); if (data->brdgCtl)
pr_info(" brdgCtl: %08x\n",
pr_info(" portStatusReg: %08x\n", data->portStatusReg); data->brdgCtl);
pr_info(" rootCmplxStatus: %08x\n", data->rootCmplxStatus); if (data->portStatusReg || data->rootCmplxStatus ||
pr_info(" busAgentStatus: %08x\n", data->busAgentStatus); data->busAgentStatus)
pr_info(" UtlSts: %08x %08x %08x\n",
pr_info(" deviceStatus: %08x\n", data->deviceStatus); data->portStatusReg, data->rootCmplxStatus,
pr_info(" slotStatus: %08x\n", data->slotStatus); data->busAgentStatus);
pr_info(" linkStatus: %08x\n", data->linkStatus); if (data->deviceStatus || data->slotStatus ||
pr_info(" devCmdStatus: %08x\n", data->devCmdStatus); data->linkStatus || data->devCmdStatus ||
pr_info(" devSecStatus: %08x\n", data->devSecStatus); data->devSecStatus)
pr_info(" RootSts: %08x %08x %08x %08x %08x\n",
pr_info(" rootErrorStatus: %08x\n", data->rootErrorStatus); data->deviceStatus, data->slotStatus,
pr_info(" uncorrErrorStatus: %08x\n", data->uncorrErrorStatus); data->linkStatus, data->devCmdStatus,
pr_info(" corrErrorStatus: %08x\n", data->corrErrorStatus); data->devSecStatus);
pr_info(" tlpHdr1: %08x\n", data->tlpHdr1); if (data->rootErrorStatus || data->uncorrErrorStatus ||
pr_info(" tlpHdr2: %08x\n", data->tlpHdr2); data->corrErrorStatus)
pr_info(" tlpHdr3: %08x\n", data->tlpHdr3); pr_info(" RootErrSts: %08x %08x %08x\n",
pr_info(" tlpHdr4: %08x\n", data->tlpHdr4); data->rootErrorStatus, data->uncorrErrorStatus,
pr_info(" sourceId: %08x\n", data->sourceId); data->corrErrorStatus);
pr_info(" errorClass: %016llx\n", data->errorClass); if (data->tlpHdr1 || data->tlpHdr2 ||
pr_info(" correlator: %016llx\n", data->correlator); data->tlpHdr3 || data->tlpHdr4)
pr_info(" p7iocPlssr: %016llx\n", data->p7iocPlssr); pr_info(" RootErrLog: %08x %08x %08x %08x\n",
pr_info(" p7iocCsr: %016llx\n", data->p7iocCsr); data->tlpHdr1, data->tlpHdr2,
pr_info(" lemFir: %016llx\n", data->lemFir); data->tlpHdr3, data->tlpHdr4);
pr_info(" lemErrorMask: %016llx\n", data->lemErrorMask); if (data->sourceId || data->errorClass ||
pr_info(" lemWOF: %016llx\n", data->lemWOF); data->correlator)
pr_info(" phbErrorStatus: %016llx\n", data->phbErrorStatus); pr_info(" RootErrLog1: %08x %016llx %016llx\n",
pr_info(" phbFirstErrorStatus: %016llx\n", data->phbFirstErrorStatus); data->sourceId, data->errorClass,
pr_info(" phbErrorLog0: %016llx\n", data->phbErrorLog0); data->correlator);
pr_info(" phbErrorLog1: %016llx\n", data->phbErrorLog1); if (data->p7iocPlssr || data->p7iocCsr)
pr_info(" mmioErrorStatus: %016llx\n", data->mmioErrorStatus); pr_info(" PhbSts: %016llx %016llx\n",
pr_info(" mmioFirstErrorStatus: %016llx\n", data->mmioFirstErrorStatus); data->p7iocPlssr, data->p7iocCsr);
pr_info(" mmioErrorLog0: %016llx\n", data->mmioErrorLog0); if (data->lemFir || data->lemErrorMask ||
pr_info(" mmioErrorLog1: %016llx\n", data->mmioErrorLog1); data->lemWOF)
pr_info(" dma0ErrorStatus: %016llx\n", data->dma0ErrorStatus); pr_info(" Lem: %016llx %016llx %016llx\n",
pr_info(" dma0FirstErrorStatus: %016llx\n", data->dma0FirstErrorStatus); data->lemFir, data->lemErrorMask,
pr_info(" dma0ErrorLog0: %016llx\n", data->dma0ErrorLog0); data->lemWOF);
pr_info(" dma0ErrorLog1: %016llx\n", data->dma0ErrorLog1); if (data->phbErrorStatus || data->phbFirstErrorStatus ||
pr_info(" dma1ErrorStatus: %016llx\n", data->dma1ErrorStatus); data->phbErrorLog0 || data->phbErrorLog1)
pr_info(" dma1FirstErrorStatus: %016llx\n", data->dma1FirstErrorStatus); pr_info(" PhbErr: %016llx %016llx %016llx %016llx\n",
pr_info(" dma1ErrorLog0: %016llx\n", data->dma1ErrorLog0); data->phbErrorStatus, data->phbFirstErrorStatus,
pr_info(" dma1ErrorLog1: %016llx\n", data->dma1ErrorLog1); data->phbErrorLog0, data->phbErrorLog1);
if (data->mmioErrorStatus || data->mmioFirstErrorStatus ||
data->mmioErrorLog0 || data->mmioErrorLog1)
pr_info(" OutErr: %016llx %016llx %016llx %016llx\n",
data->mmioErrorStatus, data->mmioFirstErrorStatus,
data->mmioErrorLog0, data->mmioErrorLog1);
if (data->dma0ErrorStatus || data->dma0FirstErrorStatus ||
data->dma0ErrorLog0 || data->dma0ErrorLog1)
pr_info(" InAErr: %016llx %016llx %016llx %016llx\n",
data->dma0ErrorStatus, data->dma0FirstErrorStatus,
data->dma0ErrorLog0, data->dma0ErrorLog1);
if (data->dma1ErrorStatus || data->dma1FirstErrorStatus ||
data->dma1ErrorLog0 || data->dma1ErrorLog1)
pr_info(" InBErr: %016llx %016llx %016llx %016llx\n",
data->dma1ErrorStatus, data->dma1FirstErrorStatus,
data->dma1ErrorLog0, data->dma1ErrorLog1);
for (i = 0; i < OPAL_P7IOC_NUM_PEST_REGS; i++) { for (i = 0; i < OPAL_P7IOC_NUM_PEST_REGS; i++) {
if ((data->pestA[i] >> 63) == 0 && if ((data->pestA[i] >> 63) == 0 &&
(data->pestB[i] >> 63) == 0) (data->pestB[i] >> 63) == 0)
continue; continue;
pr_info(" PE[%3d] PESTA: %016llx\n", i, data->pestA[i]); pr_info(" PE[%3d] A/B: %016llx %016llx\n",
pr_info(" PESTB: %016llx\n", data->pestB[i]); i, data->pestA[i], data->pestB[i]);
} }
} }
...@@ -197,62 +212,77 @@ static void pnv_pci_dump_phb3_diag_data(struct pci_controller *hose, ...@@ -197,62 +212,77 @@ static void pnv_pci_dump_phb3_diag_data(struct pci_controller *hose,
data = (struct OpalIoPhb3ErrorData*)common; data = (struct OpalIoPhb3ErrorData*)common;
pr_info("PHB3 PHB#%d Diag-data (Version: %d)\n\n", pr_info("PHB3 PHB#%d Diag-data (Version: %d)\n\n",
hose->global_number, common->version); hose->global_number, common->version);
if (data->brdgCtl)
pr_info(" brdgCtl: %08x\n", data->brdgCtl); pr_info(" brdgCtl: %08x\n",
data->brdgCtl);
pr_info(" portStatusReg: %08x\n", data->portStatusReg); if (data->portStatusReg || data->rootCmplxStatus ||
pr_info(" rootCmplxStatus: %08x\n", data->rootCmplxStatus); data->busAgentStatus)
pr_info(" busAgentStatus: %08x\n", data->busAgentStatus); pr_info(" UtlSts: %08x %08x %08x\n",
data->portStatusReg, data->rootCmplxStatus,
pr_info(" deviceStatus: %08x\n", data->deviceStatus); data->busAgentStatus);
pr_info(" slotStatus: %08x\n", data->slotStatus); if (data->deviceStatus || data->slotStatus ||
pr_info(" linkStatus: %08x\n", data->linkStatus); data->linkStatus || data->devCmdStatus ||
pr_info(" devCmdStatus: %08x\n", data->devCmdStatus); data->devSecStatus)
pr_info(" devSecStatus: %08x\n", data->devSecStatus); pr_info(" RootSts: %08x %08x %08x %08x %08x\n",
data->deviceStatus, data->slotStatus,
pr_info(" rootErrorStatus: %08x\n", data->rootErrorStatus); data->linkStatus, data->devCmdStatus,
pr_info(" uncorrErrorStatus: %08x\n", data->uncorrErrorStatus); data->devSecStatus);
pr_info(" corrErrorStatus: %08x\n", data->corrErrorStatus); if (data->rootErrorStatus || data->uncorrErrorStatus ||
pr_info(" tlpHdr1: %08x\n", data->tlpHdr1); data->corrErrorStatus)
pr_info(" tlpHdr2: %08x\n", data->tlpHdr2); pr_info(" RootErrSts: %08x %08x %08x\n",
pr_info(" tlpHdr3: %08x\n", data->tlpHdr3); data->rootErrorStatus, data->uncorrErrorStatus,
pr_info(" tlpHdr4: %08x\n", data->tlpHdr4); data->corrErrorStatus);
pr_info(" sourceId: %08x\n", data->sourceId); if (data->tlpHdr1 || data->tlpHdr2 ||
pr_info(" errorClass: %016llx\n", data->errorClass); data->tlpHdr3 || data->tlpHdr4)
pr_info(" correlator: %016llx\n", data->correlator); pr_info(" RootErrLog: %08x %08x %08x %08x\n",
data->tlpHdr1, data->tlpHdr2,
pr_info(" nFir: %016llx\n", data->nFir); data->tlpHdr3, data->tlpHdr4);
pr_info(" nFirMask: %016llx\n", data->nFirMask); if (data->sourceId || data->errorClass ||
pr_info(" nFirWOF: %016llx\n", data->nFirWOF); data->correlator)
pr_info(" PhbPlssr: %016llx\n", data->phbPlssr); pr_info(" RootErrLog1: %08x %016llx %016llx\n",
pr_info(" PhbCsr: %016llx\n", data->phbCsr); data->sourceId, data->errorClass,
pr_info(" lemFir: %016llx\n", data->lemFir); data->correlator);
pr_info(" lemErrorMask: %016llx\n", data->lemErrorMask); if (data->nFir || data->nFirMask ||
pr_info(" lemWOF: %016llx\n", data->lemWOF); data->nFirWOF)
pr_info(" phbErrorStatus: %016llx\n", data->phbErrorStatus); pr_info(" nFir: %016llx %016llx %016llx\n",
pr_info(" phbFirstErrorStatus: %016llx\n", data->phbFirstErrorStatus); data->nFir, data->nFirMask,
pr_info(" phbErrorLog0: %016llx\n", data->phbErrorLog0); data->nFirWOF);
pr_info(" phbErrorLog1: %016llx\n", data->phbErrorLog1); if (data->phbPlssr || data->phbCsr)
pr_info(" mmioErrorStatus: %016llx\n", data->mmioErrorStatus); pr_info(" PhbSts: %016llx %016llx\n",
pr_info(" mmioFirstErrorStatus: %016llx\n", data->mmioFirstErrorStatus); data->phbPlssr, data->phbCsr);
pr_info(" mmioErrorLog0: %016llx\n", data->mmioErrorLog0); if (data->lemFir || data->lemErrorMask ||
pr_info(" mmioErrorLog1: %016llx\n", data->mmioErrorLog1); data->lemWOF)
pr_info(" dma0ErrorStatus: %016llx\n", data->dma0ErrorStatus); pr_info(" Lem: %016llx %016llx %016llx\n",
pr_info(" dma0FirstErrorStatus: %016llx\n", data->dma0FirstErrorStatus); data->lemFir, data->lemErrorMask,
pr_info(" dma0ErrorLog0: %016llx\n", data->dma0ErrorLog0); data->lemWOF);
pr_info(" dma0ErrorLog1: %016llx\n", data->dma0ErrorLog1); if (data->phbErrorStatus || data->phbFirstErrorStatus ||
pr_info(" dma1ErrorStatus: %016llx\n", data->dma1ErrorStatus); data->phbErrorLog0 || data->phbErrorLog1)
pr_info(" dma1FirstErrorStatus: %016llx\n", data->dma1FirstErrorStatus); pr_info(" PhbErr: %016llx %016llx %016llx %016llx\n",
pr_info(" dma1ErrorLog0: %016llx\n", data->dma1ErrorLog0); data->phbErrorStatus, data->phbFirstErrorStatus,
pr_info(" dma1ErrorLog1: %016llx\n", data->dma1ErrorLog1); data->phbErrorLog0, data->phbErrorLog1);
if (data->mmioErrorStatus || data->mmioFirstErrorStatus ||
data->mmioErrorLog0 || data->mmioErrorLog1)
pr_info(" OutErr: %016llx %016llx %016llx %016llx\n",
data->mmioErrorStatus, data->mmioFirstErrorStatus,
data->mmioErrorLog0, data->mmioErrorLog1);
if (data->dma0ErrorStatus || data->dma0FirstErrorStatus ||
data->dma0ErrorLog0 || data->dma0ErrorLog1)
pr_info(" InAErr: %016llx %016llx %016llx %016llx\n",
data->dma0ErrorStatus, data->dma0FirstErrorStatus,
data->dma0ErrorLog0, data->dma0ErrorLog1);
if (data->dma1ErrorStatus || data->dma1FirstErrorStatus ||
data->dma1ErrorLog0 || data->dma1ErrorLog1)
pr_info(" InBErr: %016llx %016llx %016llx %016llx\n",
data->dma1ErrorStatus, data->dma1FirstErrorStatus,
data->dma1ErrorLog0, data->dma1ErrorLog1);
for (i = 0; i < OPAL_PHB3_NUM_PEST_REGS; i++) { for (i = 0; i < OPAL_PHB3_NUM_PEST_REGS; i++) {
if ((data->pestA[i] >> 63) == 0 && if ((data->pestA[i] >> 63) == 0 &&
(data->pestB[i] >> 63) == 0) (data->pestB[i] >> 63) == 0)
continue; continue;
pr_info(" PE[%3d] PESTA: %016llx\n", i, data->pestA[i]); pr_info(" PE[%3d] A/B: %016llx %016llx\n",
pr_info(" PESTB: %016llx\n", data->pestB[i]); i, data->pestA[i], data->pestB[i]);
} }
} }
......
...@@ -35,12 +35,7 @@ ...@@ -35,12 +35,7 @@
#include "offline_states.h" #include "offline_states.h"
/* This version can't take the spinlock, because it never returns */ /* This version can't take the spinlock, because it never returns */
static struct rtas_args rtas_stop_self_args = { static int rtas_stop_self_token = RTAS_UNKNOWN_SERVICE;
.token = RTAS_UNKNOWN_SERVICE,
.nargs = 0,
.nret = 1,
.rets = &rtas_stop_self_args.args[0],
};
static DEFINE_PER_CPU(enum cpu_state_vals, preferred_offline_state) = static DEFINE_PER_CPU(enum cpu_state_vals, preferred_offline_state) =
CPU_STATE_OFFLINE; CPU_STATE_OFFLINE;
...@@ -93,15 +88,20 @@ void set_default_offline_state(int cpu) ...@@ -93,15 +88,20 @@ void set_default_offline_state(int cpu)
static void rtas_stop_self(void) static void rtas_stop_self(void)
{ {
struct rtas_args *args = &rtas_stop_self_args; struct rtas_args args = {
.token = cpu_to_be32(rtas_stop_self_token),
.nargs = 0,
.nret = 1,
.rets = &args.args[0],
};
local_irq_disable(); local_irq_disable();
BUG_ON(args->token == RTAS_UNKNOWN_SERVICE); BUG_ON(rtas_stop_self_token == RTAS_UNKNOWN_SERVICE);
printk("cpu %u (hwid %u) Ready to die...\n", printk("cpu %u (hwid %u) Ready to die...\n",
smp_processor_id(), hard_smp_processor_id()); smp_processor_id(), hard_smp_processor_id());
enter_rtas(__pa(args)); enter_rtas(__pa(&args));
panic("Alas, I survived.\n"); panic("Alas, I survived.\n");
} }
...@@ -392,10 +392,10 @@ static int __init pseries_cpu_hotplug_init(void) ...@@ -392,10 +392,10 @@ static int __init pseries_cpu_hotplug_init(void)
} }
} }
rtas_stop_self_args.token = rtas_token("stop-self"); rtas_stop_self_token = rtas_token("stop-self");
qcss_tok = rtas_token("query-cpu-stopped-state"); qcss_tok = rtas_token("query-cpu-stopped-state");
if (rtas_stop_self_args.token == RTAS_UNKNOWN_SERVICE || if (rtas_stop_self_token == RTAS_UNKNOWN_SERVICE ||
qcss_tok == RTAS_UNKNOWN_SERVICE) { qcss_tok == RTAS_UNKNOWN_SERVICE) {
printk(KERN_INFO "CPU Hotplug not supported by firmware " printk(KERN_INFO "CPU Hotplug not supported by firmware "
"- disabling.\n"); "- disabling.\n");
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment