Commit b5be75d0 authored by Michael Ellerman's avatar Michael Ellerman

Merge remote-tracking branch 'benh/next' into next

Merge updates collected & acked by Ben. A few EEH patches from Gavin,
some mm updates from Aneesh and a few odds and ends.
parents e39f223f d557b098
...@@ -39,6 +39,7 @@ struct device_node; ...@@ -39,6 +39,7 @@ struct device_node;
#define EEH_PROBE_MODE_DEV 0x04 /* From PCI device */ #define EEH_PROBE_MODE_DEV 0x04 /* From PCI device */
#define EEH_PROBE_MODE_DEVTREE 0x08 /* From device tree */ #define EEH_PROBE_MODE_DEVTREE 0x08 /* From device tree */
#define EEH_ENABLE_IO_FOR_LOG 0x10 /* Enable IO for log */ #define EEH_ENABLE_IO_FOR_LOG 0x10 /* Enable IO for log */
#define EEH_EARLY_DUMP_LOG 0x20 /* Dump log immediately */
/* /*
* Delay for PE reset, all in ms * Delay for PE reset, all in ms
...@@ -72,6 +73,7 @@ struct device_node; ...@@ -72,6 +73,7 @@ struct device_node;
#define EEH_PE_ISOLATED (1 << 0) /* Isolated PE */ #define EEH_PE_ISOLATED (1 << 0) /* Isolated PE */
#define EEH_PE_RECOVERING (1 << 1) /* Recovering PE */ #define EEH_PE_RECOVERING (1 << 1) /* Recovering PE */
#define EEH_PE_CFG_BLOCKED (1 << 2) /* Block config access */ #define EEH_PE_CFG_BLOCKED (1 << 2) /* Block config access */
#define EEH_PE_RESET (1 << 3) /* PE reset in progress */
#define EEH_PE_KEEP (1 << 8) /* Keep PE on hotplug */ #define EEH_PE_KEEP (1 << 8) /* Keep PE on hotplug */
#define EEH_PE_CFG_RESTRICTED (1 << 9) /* Block config on error */ #define EEH_PE_CFG_RESTRICTED (1 << 9) /* Block config on error */
......
...@@ -60,7 +60,7 @@ struct machdep_calls { ...@@ -60,7 +60,7 @@ struct machdep_calls {
void (*hugepage_invalidate)(unsigned long vsid, void (*hugepage_invalidate)(unsigned long vsid,
unsigned long addr, unsigned long addr,
unsigned char *hpte_slot_array, unsigned char *hpte_slot_array,
int psize, int ssize); int psize, int ssize, int local);
/* special for kexec, to be called in real mode, linear mapping is /* special for kexec, to be called in real mode, linear mapping is
* destroyed as well */ * destroyed as well */
void (*hpte_clear_all)(void); void (*hpte_clear_all)(void);
......
...@@ -288,62 +288,6 @@ enum OpalMessageType { ...@@ -288,62 +288,6 @@ enum OpalMessageType {
OPAL_MSG_TYPE_MAX, OPAL_MSG_TYPE_MAX,
}; };
/* Machine check related definitions */
enum OpalMCE_Version {
OpalMCE_V1 = 1,
};
enum OpalMCE_Severity {
OpalMCE_SEV_NO_ERROR = 0,
OpalMCE_SEV_WARNING = 1,
OpalMCE_SEV_ERROR_SYNC = 2,
OpalMCE_SEV_FATAL = 3,
};
enum OpalMCE_Disposition {
OpalMCE_DISPOSITION_RECOVERED = 0,
OpalMCE_DISPOSITION_NOT_RECOVERED = 1,
};
enum OpalMCE_Initiator {
OpalMCE_INITIATOR_UNKNOWN = 0,
OpalMCE_INITIATOR_CPU = 1,
};
enum OpalMCE_ErrorType {
OpalMCE_ERROR_TYPE_UNKNOWN = 0,
OpalMCE_ERROR_TYPE_UE = 1,
OpalMCE_ERROR_TYPE_SLB = 2,
OpalMCE_ERROR_TYPE_ERAT = 3,
OpalMCE_ERROR_TYPE_TLB = 4,
};
enum OpalMCE_UeErrorType {
OpalMCE_UE_ERROR_INDETERMINATE = 0,
OpalMCE_UE_ERROR_IFETCH = 1,
OpalMCE_UE_ERROR_PAGE_TABLE_WALK_IFETCH = 2,
OpalMCE_UE_ERROR_LOAD_STORE = 3,
OpalMCE_UE_ERROR_PAGE_TABLE_WALK_LOAD_STORE = 4,
};
enum OpalMCE_SlbErrorType {
OpalMCE_SLB_ERROR_INDETERMINATE = 0,
OpalMCE_SLB_ERROR_PARITY = 1,
OpalMCE_SLB_ERROR_MULTIHIT = 2,
};
enum OpalMCE_EratErrorType {
OpalMCE_ERAT_ERROR_INDETERMINATE = 0,
OpalMCE_ERAT_ERROR_PARITY = 1,
OpalMCE_ERAT_ERROR_MULTIHIT = 2,
};
enum OpalMCE_TlbErrorType {
OpalMCE_TLB_ERROR_INDETERMINATE = 0,
OpalMCE_TLB_ERROR_PARITY = 1,
OpalMCE_TLB_ERROR_MULTIHIT = 2,
};
enum OpalThreadStatus { enum OpalThreadStatus {
OPAL_THREAD_INACTIVE = 0x0, OPAL_THREAD_INACTIVE = 0x0,
OPAL_THREAD_STARTED = 0x1, OPAL_THREAD_STARTED = 0x1,
...@@ -467,54 +411,6 @@ struct opal_ipmi_msg { ...@@ -467,54 +411,6 @@ struct opal_ipmi_msg {
uint8_t data[]; uint8_t data[];
}; };
struct opal_machine_check_event {
enum OpalMCE_Version version:8; /* 0x00 */
uint8_t in_use; /* 0x01 */
enum OpalMCE_Severity severity:8; /* 0x02 */
enum OpalMCE_Initiator initiator:8; /* 0x03 */
enum OpalMCE_ErrorType error_type:8; /* 0x04 */
enum OpalMCE_Disposition disposition:8; /* 0x05 */
uint8_t reserved_1[2]; /* 0x06 */
uint64_t gpr3; /* 0x08 */
uint64_t srr0; /* 0x10 */
uint64_t srr1; /* 0x18 */
union { /* 0x20 */
struct {
enum OpalMCE_UeErrorType ue_error_type:8;
uint8_t effective_address_provided;
uint8_t physical_address_provided;
uint8_t reserved_1[5];
uint64_t effective_address;
uint64_t physical_address;
uint8_t reserved_2[8];
} ue_error;
struct {
enum OpalMCE_SlbErrorType slb_error_type:8;
uint8_t effective_address_provided;
uint8_t reserved_1[6];
uint64_t effective_address;
uint8_t reserved_2[16];
} slb_error;
struct {
enum OpalMCE_EratErrorType erat_error_type:8;
uint8_t effective_address_provided;
uint8_t reserved_1[6];
uint64_t effective_address;
uint8_t reserved_2[16];
} erat_error;
struct {
enum OpalMCE_TlbErrorType tlb_error_type:8;
uint8_t effective_address_provided;
uint8_t reserved_1[6];
uint64_t effective_address;
uint8_t reserved_2[16];
} tlb_error;
} u;
};
/* FSP memory errors handling */ /* FSP memory errors handling */
enum OpalMemErr_Version { enum OpalMemErr_Version {
OpalMemErr_V1 = 1, OpalMemErr_V1 = 1,
......
...@@ -42,7 +42,6 @@ extern unsigned int debug_smp_processor_id(void); /* from linux/smp.h */ ...@@ -42,7 +42,6 @@ extern unsigned int debug_smp_processor_id(void); /* from linux/smp.h */
#define get_slb_shadow() (get_paca()->slb_shadow_ptr) #define get_slb_shadow() (get_paca()->slb_shadow_ptr)
struct task_struct; struct task_struct;
struct opal_machine_check_event;
/* /*
* Defines the layout of the paca. * Defines the layout of the paca.
...@@ -153,12 +152,6 @@ struct paca_struct { ...@@ -153,12 +152,6 @@ struct paca_struct {
u64 tm_scratch; /* TM scratch area for reclaim */ u64 tm_scratch; /* TM scratch area for reclaim */
#endif #endif
#ifdef CONFIG_PPC_POWERNV
/* Pointer to OPAL machine check event structure set by the
* early exception handler for use by high level C handler
*/
struct opal_machine_check_event *opal_mc_evt;
#endif
#ifdef CONFIG_PPC_BOOK3S_64 #ifdef CONFIG_PPC_BOOK3S_64
/* Exclusive emergency stack pointer for machine check exception. */ /* Exclusive emergency stack pointer for machine check exception. */
void *mc_emergency_sp; void *mc_emergency_sp;
......
...@@ -127,7 +127,9 @@ static inline void arch_leave_lazy_mmu_mode(void) ...@@ -127,7 +127,9 @@ static inline void arch_leave_lazy_mmu_mode(void)
extern void flush_hash_page(unsigned long vpn, real_pte_t pte, int psize, extern void flush_hash_page(unsigned long vpn, real_pte_t pte, int psize,
int ssize, int local); int ssize, int local);
extern void flush_hash_range(unsigned long number, int local); extern void flush_hash_range(unsigned long number, int local);
extern void flush_hash_hugepage(unsigned long vsid, unsigned long addr,
pmd_t *pmdp, unsigned int psize, int ssize,
int local);
static inline void local_flush_tlb_mm(struct mm_struct *mm) static inline void local_flush_tlb_mm(struct mm_struct *mm)
{ {
......
...@@ -726,12 +726,5 @@ int main(void) ...@@ -726,12 +726,5 @@ int main(void)
arch.timing_last_enter.tv32.tbl)); arch.timing_last_enter.tv32.tbl));
#endif #endif
#ifdef CONFIG_PPC_POWERNV
DEFINE(OPAL_MC_GPR3, offsetof(struct opal_machine_check_event, gpr3));
DEFINE(OPAL_MC_SRR0, offsetof(struct opal_machine_check_event, srr0));
DEFINE(OPAL_MC_SRR1, offsetof(struct opal_machine_check_event, srr1));
DEFINE(PACA_OPAL_MC_EVT, offsetof(struct paca_struct, opal_mc_evt));
#endif
return 0; return 0;
} }
...@@ -143,6 +143,8 @@ static int __init eeh_setup(char *str) ...@@ -143,6 +143,8 @@ static int __init eeh_setup(char *str)
{ {
if (!strcmp(str, "off")) if (!strcmp(str, "off"))
eeh_add_flag(EEH_FORCE_DISABLED); eeh_add_flag(EEH_FORCE_DISABLED);
else if (!strcmp(str, "early_log"))
eeh_add_flag(EEH_EARLY_DUMP_LOG);
return 1; return 1;
} }
...@@ -758,30 +760,41 @@ static void eeh_reset_pe_once(struct eeh_pe *pe) ...@@ -758,30 +760,41 @@ static void eeh_reset_pe_once(struct eeh_pe *pe)
int eeh_reset_pe(struct eeh_pe *pe) int eeh_reset_pe(struct eeh_pe *pe)
{ {
int flags = (EEH_STATE_MMIO_ACTIVE | EEH_STATE_DMA_ACTIVE); int flags = (EEH_STATE_MMIO_ACTIVE | EEH_STATE_DMA_ACTIVE);
int i, rc; int i, state, ret;
/* Mark as reset and block config space */
eeh_pe_state_mark(pe, EEH_PE_RESET | EEH_PE_CFG_BLOCKED);
/* Take three shots at resetting the bus */ /* Take three shots at resetting the bus */
for (i=0; i<3; i++) { for (i = 0; i < 3; i++) {
eeh_reset_pe_once(pe); eeh_reset_pe_once(pe);
/* /*
* EEH_PE_ISOLATED is expected to be removed after * EEH_PE_ISOLATED is expected to be removed after
* BAR restore. * BAR restore.
*/ */
rc = eeh_ops->wait_state(pe, PCI_BUS_RESET_WAIT_MSEC); state = eeh_ops->wait_state(pe, PCI_BUS_RESET_WAIT_MSEC);
if ((rc & flags) == flags) if ((state & flags) == flags) {
return 0; ret = 0;
goto out;
}
if (rc < 0) { if (state < 0) {
pr_err("%s: Unrecoverable slot failure on PHB#%d-PE#%x", pr_warn("%s: Unrecoverable slot failure on PHB#%d-PE#%x",
__func__, pe->phb->global_number, pe->addr); __func__, pe->phb->global_number, pe->addr);
return -1; ret = -ENOTRECOVERABLE;
goto out;
} }
pr_err("EEH: bus reset %d failed on PHB#%d-PE#%x, rc=%d\n",
i+1, pe->phb->global_number, pe->addr, rc); /* We might run out of credits */
ret = -EIO;
pr_warn("%s: Failure %d resetting PHB#%x-PE#%x\n (%d)\n",
__func__, state, pe->phb->global_number, pe->addr, (i + 1));
} }
return -1; out:
eeh_pe_state_clear(pe, EEH_PE_RESET | EEH_PE_CFG_BLOCKED);
return ret;
} }
/** /**
...@@ -920,11 +933,8 @@ int eeh_init(void) ...@@ -920,11 +933,8 @@ int eeh_init(void)
pr_warn("%s: Platform EEH operation not found\n", pr_warn("%s: Platform EEH operation not found\n",
__func__); __func__);
return -EEXIST; return -EEXIST;
} else if ((ret = eeh_ops->init())) { } else if ((ret = eeh_ops->init()))
pr_warn("%s: Failed to call platform init function (%d)\n",
__func__, ret);
return ret; return ret;
}
/* Initialize EEH event */ /* Initialize EEH event */
ret = eeh_event_init(); ret = eeh_event_init();
...@@ -1209,6 +1219,7 @@ int eeh_unfreeze_pe(struct eeh_pe *pe, bool sw_state) ...@@ -1209,6 +1219,7 @@ int eeh_unfreeze_pe(struct eeh_pe *pe, bool sw_state)
static struct pci_device_id eeh_reset_ids[] = { static struct pci_device_id eeh_reset_ids[] = {
{ PCI_DEVICE(0x19a2, 0x0710) }, /* Emulex, BE */ { PCI_DEVICE(0x19a2, 0x0710) }, /* Emulex, BE */
{ PCI_DEVICE(0x10df, 0xe220) }, /* Emulex, Lancer */ { PCI_DEVICE(0x10df, 0xe220) }, /* Emulex, Lancer */
{ PCI_DEVICE(0x14e4, 0x1657) }, /* Broadcom BCM5719 */
{ 0 } { 0 }
}; };
......
...@@ -528,13 +528,11 @@ int eeh_pe_reset_and_recover(struct eeh_pe *pe) ...@@ -528,13 +528,11 @@ int eeh_pe_reset_and_recover(struct eeh_pe *pe)
eeh_pe_dev_traverse(pe, eeh_report_error, &result); eeh_pe_dev_traverse(pe, eeh_report_error, &result);
/* Issue reset */ /* Issue reset */
eeh_pe_state_mark(pe, EEH_PE_CFG_BLOCKED);
ret = eeh_reset_pe(pe); ret = eeh_reset_pe(pe);
if (ret) { if (ret) {
eeh_pe_state_clear(pe, EEH_PE_RECOVERING | EEH_PE_CFG_BLOCKED); eeh_pe_state_clear(pe, EEH_PE_RECOVERING);
return ret; return ret;
} }
eeh_pe_state_clear(pe, EEH_PE_CFG_BLOCKED);
/* Unfreeze the PE */ /* Unfreeze the PE */
ret = eeh_clear_pe_frozen_state(pe, true); ret = eeh_clear_pe_frozen_state(pe, true);
...@@ -601,19 +599,15 @@ static int eeh_reset_device(struct eeh_pe *pe, struct pci_bus *bus) ...@@ -601,19 +599,15 @@ static int eeh_reset_device(struct eeh_pe *pe, struct pci_bus *bus)
* config accesses. So we prefer to block them. However, controlled * config accesses. So we prefer to block them. However, controlled
* PCI config accesses initiated from EEH itself are allowed. * PCI config accesses initiated from EEH itself are allowed.
*/ */
eeh_pe_state_mark(pe, EEH_PE_CFG_BLOCKED);
rc = eeh_reset_pe(pe); rc = eeh_reset_pe(pe);
if (rc) { if (rc)
eeh_pe_state_clear(pe, EEH_PE_CFG_BLOCKED);
return rc; return rc;
}
pci_lock_rescan_remove(); pci_lock_rescan_remove();
/* Restore PE */ /* Restore PE */
eeh_ops->configure_bridge(pe); eeh_ops->configure_bridge(pe);
eeh_pe_restore_bars(pe); eeh_pe_restore_bars(pe);
eeh_pe_state_clear(pe, EEH_PE_CFG_BLOCKED);
/* Clear frozen state */ /* Clear frozen state */
rc = eeh_clear_pe_frozen_state(pe, false); rc = eeh_clear_pe_frozen_state(pe, false);
......
...@@ -1312,23 +1312,6 @@ hmi_exception_after_realmode: ...@@ -1312,23 +1312,6 @@ hmi_exception_after_realmode:
EXCEPTION_PROLOG_0(PACA_EXGEN) EXCEPTION_PROLOG_0(PACA_EXGEN)
b hmi_exception_hv b hmi_exception_hv
#ifdef CONFIG_PPC_POWERNV
_GLOBAL(opal_mc_secondary_handler)
HMT_MEDIUM_PPR_DISCARD
SET_SCRATCH0(r13)
GET_PACA(r13)
clrldi r3,r3,2
tovirt(r3,r3)
std r3,PACA_OPAL_MC_EVT(r13)
ld r13,OPAL_MC_SRR0(r3)
mtspr SPRN_SRR0,r13
ld r13,OPAL_MC_SRR1(r3)
mtspr SPRN_SRR1,r13
ld r3,OPAL_MC_GPR3(r3)
GET_SCRATCH0(r13)
b machine_check_pSeries
#endif /* CONFIG_PPC_POWERNV */
#define MACHINE_CHECK_HANDLER_WINDUP \ #define MACHINE_CHECK_HANDLER_WINDUP \
/* Clear MSR_RI before setting SRR0 and SRR1. */\ /* Clear MSR_RI before setting SRR0 and SRR1. */\
......
...@@ -294,8 +294,6 @@ static long native_hpte_updatepp(unsigned long slot, unsigned long newpp, ...@@ -294,8 +294,6 @@ static long native_hpte_updatepp(unsigned long slot, unsigned long newpp,
DBG_LOW(" update(vpn=%016lx, avpnv=%016lx, group=%lx, newpp=%lx)", DBG_LOW(" update(vpn=%016lx, avpnv=%016lx, group=%lx, newpp=%lx)",
vpn, want_v & HPTE_V_AVPN, slot, newpp); vpn, want_v & HPTE_V_AVPN, slot, newpp);
native_lock_hpte(hptep);
hpte_v = be64_to_cpu(hptep->v); hpte_v = be64_to_cpu(hptep->v);
/* /*
* We need to invalidate the TLB always because hpte_remove doesn't do * We need to invalidate the TLB always because hpte_remove doesn't do
...@@ -307,17 +305,25 @@ static long native_hpte_updatepp(unsigned long slot, unsigned long newpp, ...@@ -307,17 +305,25 @@ static long native_hpte_updatepp(unsigned long slot, unsigned long newpp,
if (!HPTE_V_COMPARE(hpte_v, want_v) || !(hpte_v & HPTE_V_VALID)) { if (!HPTE_V_COMPARE(hpte_v, want_v) || !(hpte_v & HPTE_V_VALID)) {
DBG_LOW(" -> miss\n"); DBG_LOW(" -> miss\n");
ret = -1; ret = -1;
} else {
native_lock_hpte(hptep);
/* recheck with locks held */
hpte_v = be64_to_cpu(hptep->v);
if (unlikely(!HPTE_V_COMPARE(hpte_v, want_v) ||
!(hpte_v & HPTE_V_VALID))) {
ret = -1;
} else { } else {
DBG_LOW(" -> hit\n"); DBG_LOW(" -> hit\n");
/* Update the HPTE */ /* Update the HPTE */
hptep->r = cpu_to_be64((be64_to_cpu(hptep->r) & ~(HPTE_R_PP | HPTE_R_N)) | hptep->r = cpu_to_be64((be64_to_cpu(hptep->r) &
(newpp & (HPTE_R_PP | HPTE_R_N | HPTE_R_C))); ~(HPTE_R_PP | HPTE_R_N)) |
(newpp & (HPTE_R_PP | HPTE_R_N |
HPTE_R_C)));
} }
native_unlock_hpte(hptep); native_unlock_hpte(hptep);
}
/* Ensure it is out of the tlb too. */ /* Ensure it is out of the tlb too. */
tlbie(vpn, bpsize, apsize, ssize, local); tlbie(vpn, bpsize, apsize, ssize, local);
return ret; return ret;
} }
...@@ -419,7 +425,7 @@ static void native_hpte_invalidate(unsigned long slot, unsigned long vpn, ...@@ -419,7 +425,7 @@ static void native_hpte_invalidate(unsigned long slot, unsigned long vpn,
static void native_hugepage_invalidate(unsigned long vsid, static void native_hugepage_invalidate(unsigned long vsid,
unsigned long addr, unsigned long addr,
unsigned char *hpte_slot_array, unsigned char *hpte_slot_array,
int psize, int ssize) int psize, int ssize, int local)
{ {
int i; int i;
struct hash_pte *hptep; struct hash_pte *hptep;
...@@ -465,7 +471,7 @@ static void native_hugepage_invalidate(unsigned long vsid, ...@@ -465,7 +471,7 @@ static void native_hugepage_invalidate(unsigned long vsid,
* instruction compares entry_VA in tlb with the VA specified * instruction compares entry_VA in tlb with the VA specified
* here * here
*/ */
tlbie(vpn, psize, actual_psize, ssize, 0); tlbie(vpn, psize, actual_psize, ssize, local);
} }
local_irq_restore(flags); local_irq_restore(flags);
} }
......
...@@ -1315,6 +1315,76 @@ void flush_hash_page(unsigned long vpn, real_pte_t pte, int psize, int ssize, ...@@ -1315,6 +1315,76 @@ void flush_hash_page(unsigned long vpn, real_pte_t pte, int psize, int ssize,
#endif #endif
} }
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
void flush_hash_hugepage(unsigned long vsid, unsigned long addr,
pmd_t *pmdp, unsigned int psize, int ssize, int local)
{
int i, max_hpte_count, valid;
unsigned long s_addr;
unsigned char *hpte_slot_array;
unsigned long hidx, shift, vpn, hash, slot;
s_addr = addr & HPAGE_PMD_MASK;
hpte_slot_array = get_hpte_slot_array(pmdp);
/*
* IF we try to do a HUGE PTE update after a withdraw is done.
* we will find the below NULL. This happens when we do
* split_huge_page_pmd
*/
if (!hpte_slot_array)
return;
if (ppc_md.hugepage_invalidate) {
ppc_md.hugepage_invalidate(vsid, s_addr, hpte_slot_array,
psize, ssize, local);
goto tm_abort;
}
/*
* No bluk hpte removal support, invalidate each entry
*/
shift = mmu_psize_defs[psize].shift;
max_hpte_count = HPAGE_PMD_SIZE >> shift;
for (i = 0; i < max_hpte_count; i++) {
/*
* 8 bits per each hpte entries
* 000| [ secondary group (one bit) | hidx (3 bits) | valid bit]
*/
valid = hpte_valid(hpte_slot_array, i);
if (!valid)
continue;
hidx = hpte_hash_index(hpte_slot_array, i);
/* get the vpn */
addr = s_addr + (i * (1ul << shift));
vpn = hpt_vpn(addr, vsid, ssize);
hash = hpt_hash(vpn, shift, ssize);
if (hidx & _PTEIDX_SECONDARY)
hash = ~hash;
slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
slot += hidx & _PTEIDX_GROUP_IX;
ppc_md.hpte_invalidate(slot, vpn, psize,
MMU_PAGE_16M, ssize, local);
}
tm_abort:
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
/* Transactions are not aborted by tlbiel, only tlbie.
* Without, syncing a page back to a block device w/ PIO could pick up
* transactional data (bad!) so we force an abort here. Before the
* sync the page will be made read-only, which will flush_hash_page.
* BIG ISSUE here: if the kernel uses a page from userspace without
* unmapping it first, it may see the speculated version.
*/
if (local && cpu_has_feature(CPU_FTR_TM) &&
current->thread.regs &&
MSR_TM_ACTIVE(current->thread.regs->msr)) {
tm_enable();
tm_abort(TM_CAUSE_TLBI);
}
#endif
}
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
void flush_hash_range(unsigned long number, int local) void flush_hash_range(unsigned long number, int local)
{ {
if (ppc_md.flush_hash_range) if (ppc_md.flush_hash_range)
......
...@@ -18,57 +18,6 @@ ...@@ -18,57 +18,6 @@
#include <linux/mm.h> #include <linux/mm.h>
#include <asm/machdep.h> #include <asm/machdep.h>
static void invalidate_old_hpte(unsigned long vsid, unsigned long addr,
pmd_t *pmdp, unsigned int psize, int ssize)
{
int i, max_hpte_count, valid;
unsigned long s_addr;
unsigned char *hpte_slot_array;
unsigned long hidx, shift, vpn, hash, slot;
s_addr = addr & HPAGE_PMD_MASK;
hpte_slot_array = get_hpte_slot_array(pmdp);
/*
* IF we try to do a HUGE PTE update after a withdraw is done.
* we will find the below NULL. This happens when we do
* split_huge_page_pmd
*/
if (!hpte_slot_array)
return;
if (ppc_md.hugepage_invalidate)
return ppc_md.hugepage_invalidate(vsid, s_addr, hpte_slot_array,
psize, ssize);
/*
* No bluk hpte removal support, invalidate each entry
*/
shift = mmu_psize_defs[psize].shift;
max_hpte_count = HPAGE_PMD_SIZE >> shift;
for (i = 0; i < max_hpte_count; i++) {
/*
* 8 bits per each hpte entries
* 000| [ secondary group (one bit) | hidx (3 bits) | valid bit]
*/
valid = hpte_valid(hpte_slot_array, i);
if (!valid)
continue;
hidx = hpte_hash_index(hpte_slot_array, i);
/* get the vpn */
addr = s_addr + (i * (1ul << shift));
vpn = hpt_vpn(addr, vsid, ssize);
hash = hpt_hash(vpn, shift, ssize);
if (hidx & _PTEIDX_SECONDARY)
hash = ~hash;
slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
slot += hidx & _PTEIDX_GROUP_IX;
ppc_md.hpte_invalidate(slot, vpn, psize,
MMU_PAGE_16M, ssize, 0);
}
}
int __hash_page_thp(unsigned long ea, unsigned long access, unsigned long vsid, int __hash_page_thp(unsigned long ea, unsigned long access, unsigned long vsid,
pmd_t *pmdp, unsigned long trap, int local, int ssize, pmd_t *pmdp, unsigned long trap, int local, int ssize,
unsigned int psize) unsigned int psize)
...@@ -145,7 +94,8 @@ int __hash_page_thp(unsigned long ea, unsigned long access, unsigned long vsid, ...@@ -145,7 +94,8 @@ int __hash_page_thp(unsigned long ea, unsigned long access, unsigned long vsid,
* hash page table entries. * hash page table entries.
*/ */
if ((old_pmd & _PAGE_HASHPTE) && !(old_pmd & _PAGE_COMBO)) if ((old_pmd & _PAGE_HASHPTE) && !(old_pmd & _PAGE_COMBO))
invalidate_old_hpte(vsid, ea, pmdp, MMU_PAGE_64K, ssize); flush_hash_hugepage(vsid, ea, pmdp, MMU_PAGE_64K,
ssize, local);
} }
valid = hpte_valid(hpte_slot_array, index); valid = hpte_valid(hpte_slot_array, index);
......
...@@ -355,6 +355,13 @@ static int __init do_gpage_early_setup(char *param, char *val, ...@@ -355,6 +355,13 @@ static int __init do_gpage_early_setup(char *param, char *val,
if (size != 0) { if (size != 0) {
if (sscanf(val, "%lu", &npages) <= 0) if (sscanf(val, "%lu", &npages) <= 0)
npages = 0; npages = 0;
if (npages > MAX_NUMBER_GPAGES) {
pr_warn("MMU: %lu pages requested for page "
"size %llu KB, limiting to "
__stringify(MAX_NUMBER_GPAGES) "\n",
npages, size / 1024);
npages = MAX_NUMBER_GPAGES;
}
gpage_npages[shift_to_mmu_psize(__ffs(size))] = npages; gpage_npages[shift_to_mmu_psize(__ffs(size))] = npages;
size = 0; size = 0;
} }
......
...@@ -739,29 +739,14 @@ void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address, ...@@ -739,29 +739,14 @@ void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
void hpte_do_hugepage_flush(struct mm_struct *mm, unsigned long addr, void hpte_do_hugepage_flush(struct mm_struct *mm, unsigned long addr,
pmd_t *pmdp, unsigned long old_pmd) pmd_t *pmdp, unsigned long old_pmd)
{ {
int ssize, i; int ssize, local = 0;
unsigned long s_addr; unsigned int psize;
int max_hpte_count; unsigned long vsid;
unsigned int psize, valid; const struct cpumask *tmp;
unsigned char *hpte_slot_array;
unsigned long hidx, vpn, vsid, hash, shift, slot;
/*
* Flush all the hptes mapping this hugepage
*/
s_addr = addr & HPAGE_PMD_MASK;
hpte_slot_array = get_hpte_slot_array(pmdp);
/*
* IF we try to do a HUGE PTE update after a withdraw is done.
* we will find the below NULL. This happens when we do
* split_huge_page_pmd
*/
if (!hpte_slot_array)
return;
/* get the base page size,vsid and segment size */ /* get the base page size,vsid and segment size */
#ifdef CONFIG_DEBUG_VM #ifdef CONFIG_DEBUG_VM
psize = get_slice_psize(mm, s_addr); psize = get_slice_psize(mm, addr);
BUG_ON(psize == MMU_PAGE_16M); BUG_ON(psize == MMU_PAGE_16M);
#endif #endif
if (old_pmd & _PAGE_COMBO) if (old_pmd & _PAGE_COMBO)
...@@ -769,46 +754,20 @@ void hpte_do_hugepage_flush(struct mm_struct *mm, unsigned long addr, ...@@ -769,46 +754,20 @@ void hpte_do_hugepage_flush(struct mm_struct *mm, unsigned long addr,
else else
psize = MMU_PAGE_64K; psize = MMU_PAGE_64K;
if (!is_kernel_addr(s_addr)) { if (!is_kernel_addr(addr)) {
ssize = user_segment_size(s_addr); ssize = user_segment_size(addr);
vsid = get_vsid(mm->context.id, s_addr, ssize); vsid = get_vsid(mm->context.id, addr, ssize);
WARN_ON(vsid == 0); WARN_ON(vsid == 0);
} else { } else {
vsid = get_kernel_vsid(s_addr, mmu_kernel_ssize); vsid = get_kernel_vsid(addr, mmu_kernel_ssize);
ssize = mmu_kernel_ssize; ssize = mmu_kernel_ssize;
} }
if (ppc_md.hugepage_invalidate) tmp = cpumask_of(smp_processor_id());
return ppc_md.hugepage_invalidate(vsid, s_addr, if (cpumask_equal(mm_cpumask(mm), tmp))
hpte_slot_array, local = 1;
psize, ssize);
/* return flush_hash_hugepage(vsid, addr, pmdp, psize, ssize, local);
* No bluk hpte removal support, invalidate each entry
*/
shift = mmu_psize_defs[psize].shift;
max_hpte_count = HPAGE_PMD_SIZE >> shift;
for (i = 0; i < max_hpte_count; i++) {
/*
* 8 bits per each hpte entries
* 000| [ secondary group (one bit) | hidx (3 bits) | valid bit]
*/
valid = hpte_valid(hpte_slot_array, i);
if (!valid)
continue;
hidx = hpte_hash_index(hpte_slot_array, i);
/* get the vpn */
addr = s_addr + (i * (1ul << shift));
vpn = hpt_vpn(addr, vsid, ssize);
hash = hpt_hash(vpn, shift, ssize);
if (hidx & _PTEIDX_SECONDARY)
hash = ~hash;
slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
slot += hidx & _PTEIDX_GROUP_IX;
ppc_md.hpte_invalidate(slot, vpn, psize,
MMU_PAGE_16M, ssize, 0);
}
} }
static pmd_t pmd_set_protbits(pmd_t pmd, pgprot_t pgprot) static pmd_t pmd_set_protbits(pmd_t pmd, pgprot_t pgprot)
......
...@@ -10,7 +10,7 @@ ...@@ -10,7 +10,7 @@
#include <linux/oprofile.h> #include <linux/oprofile.h>
#include <linux/sched.h> #include <linux/sched.h>
#include <asm/processor.h> #include <asm/processor.h>
#include <asm/uaccess.h> #include <linux/uaccess.h>
#include <asm/compat.h> #include <asm/compat.h>
#include <asm/oprofile_impl.h> #include <asm/oprofile_impl.h>
...@@ -105,6 +105,7 @@ void op_powerpc_backtrace(struct pt_regs * const regs, unsigned int depth) ...@@ -105,6 +105,7 @@ void op_powerpc_backtrace(struct pt_regs * const regs, unsigned int depth)
first_frame = 0; first_frame = 0;
} }
} else { } else {
pagefault_disable();
#ifdef CONFIG_PPC64 #ifdef CONFIG_PPC64
if (!is_32bit_task()) { if (!is_32bit_task()) {
while (depth--) { while (depth--) {
...@@ -113,7 +114,7 @@ void op_powerpc_backtrace(struct pt_regs * const regs, unsigned int depth) ...@@ -113,7 +114,7 @@ void op_powerpc_backtrace(struct pt_regs * const regs, unsigned int depth)
break; break;
first_frame = 0; first_frame = 0;
} }
pagefault_enable();
return; return;
} }
#endif #endif
...@@ -124,5 +125,6 @@ void op_powerpc_backtrace(struct pt_regs * const regs, unsigned int depth) ...@@ -124,5 +125,6 @@ void op_powerpc_backtrace(struct pt_regs * const regs, unsigned int depth)
break; break;
first_frame = 0; first_frame = 0;
} }
pagefault_enable();
} }
} }
...@@ -353,6 +353,9 @@ static int ioda_eeh_get_phb_state(struct eeh_pe *pe) ...@@ -353,6 +353,9 @@ static int ioda_eeh_get_phb_state(struct eeh_pe *pe)
} else if (!(pe->state & EEH_PE_ISOLATED)) { } else if (!(pe->state & EEH_PE_ISOLATED)) {
eeh_pe_state_mark(pe, EEH_PE_ISOLATED); eeh_pe_state_mark(pe, EEH_PE_ISOLATED);
ioda_eeh_phb_diag(pe); ioda_eeh_phb_diag(pe);
if (eeh_has_flag(EEH_EARLY_DUMP_LOG))
pnv_pci_dump_phb_diag_data(pe->phb, pe->data);
} }
return result; return result;
...@@ -372,7 +375,7 @@ static int ioda_eeh_get_pe_state(struct eeh_pe *pe) ...@@ -372,7 +375,7 @@ static int ioda_eeh_get_pe_state(struct eeh_pe *pe)
* moving forward, we have to return operational * moving forward, we have to return operational
* state during PE reset. * state during PE reset.
*/ */
if (pe->state & EEH_PE_CFG_BLOCKED) { if (pe->state & EEH_PE_RESET) {
result = (EEH_STATE_MMIO_ACTIVE | result = (EEH_STATE_MMIO_ACTIVE |
EEH_STATE_DMA_ACTIVE | EEH_STATE_DMA_ACTIVE |
EEH_STATE_MMIO_ENABLED | EEH_STATE_MMIO_ENABLED |
...@@ -451,6 +454,9 @@ static int ioda_eeh_get_pe_state(struct eeh_pe *pe) ...@@ -451,6 +454,9 @@ static int ioda_eeh_get_pe_state(struct eeh_pe *pe)
eeh_pe_state_mark(pe, EEH_PE_ISOLATED); eeh_pe_state_mark(pe, EEH_PE_ISOLATED);
ioda_eeh_phb_diag(pe); ioda_eeh_phb_diag(pe);
if (eeh_has_flag(EEH_EARLY_DUMP_LOG))
pnv_pci_dump_phb_diag_data(pe->phb, pe->data);
} }
return result; return result;
...@@ -730,6 +736,7 @@ static int ioda_eeh_reset(struct eeh_pe *pe, int option) ...@@ -730,6 +736,7 @@ static int ioda_eeh_reset(struct eeh_pe *pe, int option)
static int ioda_eeh_get_log(struct eeh_pe *pe, int severity, static int ioda_eeh_get_log(struct eeh_pe *pe, int severity,
char *drv_log, unsigned long len) char *drv_log, unsigned long len)
{ {
if (!eeh_has_flag(EEH_EARLY_DUMP_LOG))
pnv_pci_dump_phb_diag_data(pe->phb, pe->data); pnv_pci_dump_phb_diag_data(pe->phb, pe->data);
return 0; return 0;
...@@ -1086,6 +1093,10 @@ static int ioda_eeh_next_error(struct eeh_pe **pe) ...@@ -1086,6 +1093,10 @@ static int ioda_eeh_next_error(struct eeh_pe **pe)
!((*pe)->state & EEH_PE_ISOLATED)) { !((*pe)->state & EEH_PE_ISOLATED)) {
eeh_pe_state_mark(*pe, EEH_PE_ISOLATED); eeh_pe_state_mark(*pe, EEH_PE_ISOLATED);
ioda_eeh_phb_diag(*pe); ioda_eeh_phb_diag(*pe);
if (eeh_has_flag(EEH_EARLY_DUMP_LOG))
pnv_pci_dump_phb_diag_data((*pe)->phb,
(*pe)->data);
} }
/* /*
......
...@@ -50,7 +50,6 @@ static int mc_recoverable_range_len; ...@@ -50,7 +50,6 @@ static int mc_recoverable_range_len;
struct device_node *opal_node; struct device_node *opal_node;
static DEFINE_SPINLOCK(opal_write_lock); static DEFINE_SPINLOCK(opal_write_lock);
extern u64 opal_mc_secondary_handler[];
static unsigned int *opal_irqs; static unsigned int *opal_irqs;
static unsigned int opal_irq_count; static unsigned int opal_irq_count;
static ATOMIC_NOTIFIER_HEAD(opal_notifier_head); static ATOMIC_NOTIFIER_HEAD(opal_notifier_head);
......
...@@ -442,7 +442,7 @@ static void __pSeries_lpar_hugepage_invalidate(unsigned long *slot, ...@@ -442,7 +442,7 @@ static void __pSeries_lpar_hugepage_invalidate(unsigned long *slot,
static void pSeries_lpar_hugepage_invalidate(unsigned long vsid, static void pSeries_lpar_hugepage_invalidate(unsigned long vsid,
unsigned long addr, unsigned long addr,
unsigned char *hpte_slot_array, unsigned char *hpte_slot_array,
int psize, int ssize) int psize, int ssize, int local)
{ {
int i, index = 0; int i, index = 0;
unsigned long s_addr = addr; unsigned long s_addr = addr;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment