Commit 8766dc68 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'powerpc-4.8-3' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux

Pull powerpc fixes from Michael Ellerman:
 "Some powerpc fixes for 4.8:

  Misc:
   - powerpc/vdso: Fix build rules to rebuild vdsos correctly from Nicholas Piggin
   - powerpc/ptrace: Fix coredump since ptrace TM changes from Cyril Bur
   - powerpc/32: Fix csum_partial_copy_generic() from Christophe Leroy
   - cxl: Set psl_fir_cntl to production environment value from Frederic Barrat
   - powerpc/eeh: Switch to conventional PCI address output in EEH log from Guilherme G. Piccoli
   - cxl: Use fixed width predefined types in data structure. from Philippe Bergheaud
   - powerpc/vdso: Add missing include file from Guenter Roeck
   - powerpc: Fix unused function warning 'lmb_to_memblock' from Alastair D'Silva
   - powerpc/powernv/ioda: Fix TCE invalidate to work in real mode again from Alexey Kardashevskiy
   - powerpc/cell: Add missing error code in spufs_mkgang() from Dan Carpenter
   - crypto: crc32c-vpmsum - Convert to CPU feature based module autoloading from Anton Blanchard
   - powerpc/pasemi: Fix coherent_dma_mask for dma engine from Darren Stevens

  Benjamin Herrenschmidt:
   - powerpc/32: Fix crash during static key init
   - powerpc: Update obsolete comment in setup_32.c about early_init()
   - powerpc: Print the kernel load address at the end of prom_init()
   - powerpc/pnv/pci: Fix incorrect PE reservation attempt on some 64-bit BARs
   - powerpc/xics: Properly set Edge/Level type and enable resend

  Mahesh Salgaonkar:
   - powerpc/book3s: Fix MCE console messages for unrecoverable MCE.
   - powerpc/powernv: Fix MCE handler to avoid trashing CR0/CR1 registers.
   - powerpc/powernv: Move IDLE_STATE_ENTER_SEQ macro to cpuidle.h
   - powerpc/powernv: Load correct TOC pointer while waking up from winkle.

  Andrew Donnellan:
   - cxl: Fix sparse warnings
   - cxl: Fix NULL dereference in cxl_context_init() on PowerVM guests

  Michael Ellerman:
   - selftests/powerpc: Specify we expect to build with std=gnu99
   - powerpc/Makefile: Use cflags-y/aflags-y for setting endian options
   - powerpc/pci: Fix endian bug in fixed PHB numbering"

* tag 'powerpc-4.8-3' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux: (26 commits)
  selftests/powerpc: Specify we expect to build with std=gnu99
  powerpc/vdso: Fix build rules to rebuild vdsos correctly
  powerpc/Makefile: Use cflags-y/aflags-y for setting endian options
  powerpc/32: Fix crash during static key init
  powerpc: Update obsolete comment in setup_32.c about early_init()
  powerpc: Print the kernel load address at the end of prom_init()
  powerpc/ptrace: Fix coredump since ptrace TM changes
  powerpc/32: Fix csum_partial_copy_generic()
  cxl: Set psl_fir_cntl to production environment value
  powerpc/pnv/pci: Fix incorrect PE reservation attempt on some 64-bit BARs
  powerpc/book3s: Fix MCE console messages for unrecoverable MCE.
  powerpc/pci: Fix endian bug in fixed PHB numbering
  powerpc/eeh: Switch to conventional PCI address output in EEH log
  cxl: Fix sparse warnings
  cxl: Fix NULL dereference in cxl_context_init() on PowerVM guests
  cxl: Use fixed width predefined types in data structure.
  powerpc/vdso: Add missing include file
  powerpc: Fix unused function warning 'lmb_to_memblock'
  powerpc/powernv: Fix MCE handler to avoid trashing CR0/CR1 registers.
  powerpc/powernv: Move IDLE_STATE_ENTER_SEQ macro to cpuidle.h
  ...
parents 4b9eaf33 ca49e64f
...@@ -66,29 +66,28 @@ endif ...@@ -66,29 +66,28 @@ endif
UTS_MACHINE := $(OLDARCH) UTS_MACHINE := $(OLDARCH)
ifeq ($(CONFIG_CPU_LITTLE_ENDIAN),y) ifeq ($(CONFIG_CPU_LITTLE_ENDIAN),y)
override CC += -mlittle-endian
ifneq ($(cc-name),clang)
override CC += -mno-strict-align
endif
override AS += -mlittle-endian
override LD += -EL override LD += -EL
override CROSS32CC += -mlittle-endian
override CROSS32AS += -mlittle-endian override CROSS32AS += -mlittle-endian
LDEMULATION := lppc LDEMULATION := lppc
GNUTARGET := powerpcle GNUTARGET := powerpcle
MULTIPLEWORD := -mno-multiple MULTIPLEWORD := -mno-multiple
KBUILD_CFLAGS_MODULE += $(call cc-option,-mno-save-toc-indirect) KBUILD_CFLAGS_MODULE += $(call cc-option,-mno-save-toc-indirect)
else else
ifeq ($(call cc-option-yn,-mbig-endian),y)
override CC += -mbig-endian
override AS += -mbig-endian
endif
override LD += -EB override LD += -EB
LDEMULATION := ppc LDEMULATION := ppc
GNUTARGET := powerpc GNUTARGET := powerpc
MULTIPLEWORD := -mmultiple MULTIPLEWORD := -mmultiple
endif endif
cflags-$(CONFIG_CPU_BIG_ENDIAN) += $(call cc-option,-mbig-endian)
cflags-$(CONFIG_CPU_LITTLE_ENDIAN) += -mlittle-endian
ifneq ($(cc-name),clang)
cflags-$(CONFIG_CPU_LITTLE_ENDIAN) += -mno-strict-align
endif
aflags-$(CONFIG_CPU_BIG_ENDIAN) += $(call cc-option,-mbig-endian)
aflags-$(CONFIG_CPU_LITTLE_ENDIAN) += -mlittle-endian
ifeq ($(HAS_BIARCH),y) ifeq ($(HAS_BIARCH),y)
override AS += -a$(CONFIG_WORD_SIZE) override AS += -a$(CONFIG_WORD_SIZE)
override LD += -m elf$(CONFIG_WORD_SIZE)$(LDEMULATION) override LD += -m elf$(CONFIG_WORD_SIZE)$(LDEMULATION)
...@@ -232,6 +231,9 @@ cpu-as-$(CONFIG_E200) += -Wa,-me200 ...@@ -232,6 +231,9 @@ cpu-as-$(CONFIG_E200) += -Wa,-me200
KBUILD_AFLAGS += $(cpu-as-y) KBUILD_AFLAGS += $(cpu-as-y)
KBUILD_CFLAGS += $(cpu-as-y) KBUILD_CFLAGS += $(cpu-as-y)
KBUILD_AFLAGS += $(aflags-y)
KBUILD_CFLAGS += $(cflags-y)
head-y := arch/powerpc/kernel/head_$(CONFIG_WORD_SIZE).o head-y := arch/powerpc/kernel/head_$(CONFIG_WORD_SIZE).o
head-$(CONFIG_8xx) := arch/powerpc/kernel/head_8xx.o head-$(CONFIG_8xx) := arch/powerpc/kernel/head_8xx.o
head-$(CONFIG_40x) := arch/powerpc/kernel/head_40x.o head-$(CONFIG_40x) := arch/powerpc/kernel/head_40x.o
......
...@@ -4,6 +4,7 @@ ...@@ -4,6 +4,7 @@
#include <linux/module.h> #include <linux/module.h>
#include <linux/string.h> #include <linux/string.h>
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/cpufeature.h>
#include <asm/switch_to.h> #include <asm/switch_to.h>
#define CHKSUM_BLOCK_SIZE 1 #define CHKSUM_BLOCK_SIZE 1
...@@ -157,7 +158,7 @@ static void __exit crc32c_vpmsum_mod_fini(void) ...@@ -157,7 +158,7 @@ static void __exit crc32c_vpmsum_mod_fini(void)
crypto_unregister_shash(&alg); crypto_unregister_shash(&alg);
} }
module_init(crc32c_vpmsum_mod_init); module_cpu_feature_match(PPC_MODULE_FEATURE_VEC_CRYPTO, crc32c_vpmsum_mod_init);
module_exit(crc32c_vpmsum_mod_fini); module_exit(crc32c_vpmsum_mod_fini);
MODULE_AUTHOR("Anton Blanchard <anton@samba.org>"); MODULE_AUTHOR("Anton Blanchard <anton@samba.org>");
......
...@@ -19,4 +19,17 @@ extern u64 pnv_first_deep_stop_state; ...@@ -19,4 +19,17 @@ extern u64 pnv_first_deep_stop_state;
#endif #endif
/* Idle state entry routines */
#ifdef CONFIG_PPC_P7_NAP
#define IDLE_STATE_ENTER_SEQ(IDLE_INST) \
/* Magic NAP/SLEEP/WINKLE mode enter sequence */ \
std r0,0(r1); \
ptesync; \
ld r0,0(r1); \
1: cmp cr0,r0,r0; \
bne 1b; \
IDLE_INST; \
b .
#endif /* CONFIG_PPC_P7_NAP */
#endif #endif
...@@ -186,6 +186,7 @@ label##3: \ ...@@ -186,6 +186,7 @@ label##3: \
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
void apply_feature_fixups(void); void apply_feature_fixups(void);
void setup_feature_keys(void);
#endif #endif
#endif /* __ASM_POWERPC_FEATURE_FIXUPS_H */ #endif /* __ASM_POWERPC_FEATURE_FIXUPS_H */
...@@ -75,14 +75,6 @@ static inline void disable_kernel_spe(void) ...@@ -75,14 +75,6 @@ static inline void disable_kernel_spe(void)
static inline void __giveup_spe(struct task_struct *t) { } static inline void __giveup_spe(struct task_struct *t) { }
#endif #endif
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
extern void flush_tmregs_to_thread(struct task_struct *);
#else
static inline void flush_tmregs_to_thread(struct task_struct *t)
{
}
#endif
static inline void clear_task_ebb(struct task_struct *t) static inline void clear_task_ebb(struct task_struct *t)
{ {
#ifdef CONFIG_PPC_BOOK3S_64 #ifdef CONFIG_PPC_BOOK3S_64
......
...@@ -159,6 +159,8 @@ extern void xics_teardown_cpu(void); ...@@ -159,6 +159,8 @@ extern void xics_teardown_cpu(void);
extern void xics_kexec_teardown_cpu(int secondary); extern void xics_kexec_teardown_cpu(int secondary);
extern void xics_migrate_irqs_away(void); extern void xics_migrate_irqs_away(void);
extern void icp_native_eoi(struct irq_data *d); extern void icp_native_eoi(struct irq_data *d);
extern int xics_set_irq_type(struct irq_data *d, unsigned int flow_type);
extern int xics_retrigger(struct irq_data *data);
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
extern int xics_get_irq_server(unsigned int virq, const struct cpumask *cpumask, extern int xics_get_irq_server(unsigned int virq, const struct cpumask *cpumask,
unsigned int strict_check); unsigned int strict_check);
......
...@@ -168,10 +168,10 @@ static size_t eeh_dump_dev_log(struct eeh_dev *edev, char *buf, size_t len) ...@@ -168,10 +168,10 @@ static size_t eeh_dump_dev_log(struct eeh_dev *edev, char *buf, size_t len)
int n = 0, l = 0; int n = 0, l = 0;
char buffer[128]; char buffer[128];
n += scnprintf(buf+n, len-n, "%04x:%02x:%02x:%01x\n", n += scnprintf(buf+n, len-n, "%04x:%02x:%02x.%01x\n",
edev->phb->global_number, pdn->busno, edev->phb->global_number, pdn->busno,
PCI_SLOT(pdn->devfn), PCI_FUNC(pdn->devfn)); PCI_SLOT(pdn->devfn), PCI_FUNC(pdn->devfn));
pr_warn("EEH: of node=%04x:%02x:%02x:%01x\n", pr_warn("EEH: of node=%04x:%02x:%02x.%01x\n",
edev->phb->global_number, pdn->busno, edev->phb->global_number, pdn->busno,
PCI_SLOT(pdn->devfn), PCI_FUNC(pdn->devfn)); PCI_SLOT(pdn->devfn), PCI_FUNC(pdn->devfn));
......
...@@ -144,29 +144,14 @@ machine_check_pSeries_1: ...@@ -144,29 +144,14 @@ machine_check_pSeries_1:
* vector * vector
*/ */
SET_SCRATCH0(r13) /* save r13 */ SET_SCRATCH0(r13) /* save r13 */
#ifdef CONFIG_PPC_P7_NAP /*
BEGIN_FTR_SECTION * Running native on arch 2.06 or later, we may wakeup from winkle
/* Running native on arch 2.06 or later, check if we are * inside machine check. If yes, then last bit of HSPGR0 would be set
* waking up from nap. We only handle no state loss and * to 1. Hence clear it unconditionally.
* supervisor state loss. We do -not- handle hypervisor
* state loss at this time.
*/ */
mfspr r13,SPRN_SRR1 GET_PACA(r13)
rlwinm. r13,r13,47-31,30,31 clrrdi r13,r13,1
OPT_GET_SPR(r13, SPRN_CFAR, CPU_FTR_CFAR) SET_PACA(r13)
beq 9f
mfspr r13,SPRN_SRR1
rlwinm. r13,r13,47-31,30,31
/* waking up from powersave (nap) state */
cmpwi cr1,r13,2
/* Total loss of HV state is fatal. let's just stay stuck here */
OPT_GET_SPR(r13, SPRN_CFAR, CPU_FTR_CFAR)
bgt cr1,.
9:
OPT_SET_SPR(r13, SPRN_CFAR, CPU_FTR_CFAR)
END_FTR_SECTION_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
#endif /* CONFIG_PPC_P7_NAP */
EXCEPTION_PROLOG_0(PACA_EXMC) EXCEPTION_PROLOG_0(PACA_EXMC)
BEGIN_FTR_SECTION BEGIN_FTR_SECTION
b machine_check_powernv_early b machine_check_powernv_early
...@@ -1273,25 +1258,51 @@ machine_check_handle_early: ...@@ -1273,25 +1258,51 @@ machine_check_handle_early:
* Check if thread was in power saving mode. We come here when any * Check if thread was in power saving mode. We come here when any
* of the following is true: * of the following is true:
* a. thread wasn't in power saving mode * a. thread wasn't in power saving mode
* b. thread was in power saving mode with no state loss or * b. thread was in power saving mode with no state loss,
* supervisor state loss * supervisor state loss or hypervisor state loss.
* *
* Go back to nap again if (b) is true. * Go back to nap/sleep/winkle mode again if (b) is true.
*/ */
rlwinm. r11,r12,47-31,30,31 /* Was it in power saving mode? */ rlwinm. r11,r12,47-31,30,31 /* Was it in power saving mode? */
beq 4f /* No, it wasn;t */ beq 4f /* No, it wasn;t */
/* Thread was in power saving mode. Go back to nap again. */ /* Thread was in power saving mode. Go back to nap again. */
cmpwi r11,2 cmpwi r11,2
bne 3f blt 3f
/* Supervisor state loss */ /* Supervisor/Hypervisor state loss */
li r0,1 li r0,1
stb r0,PACA_NAPSTATELOST(r13) stb r0,PACA_NAPSTATELOST(r13)
3: bl machine_check_queue_event 3: bl machine_check_queue_event
MACHINE_CHECK_HANDLER_WINDUP MACHINE_CHECK_HANDLER_WINDUP
GET_PACA(r13) GET_PACA(r13)
ld r1,PACAR1(r13) ld r1,PACAR1(r13)
li r3,PNV_THREAD_NAP /*
b pnv_enter_arch207_idle_mode * Check what idle state this CPU was in and go back to same mode
* again.
*/
lbz r3,PACA_THREAD_IDLE_STATE(r13)
cmpwi r3,PNV_THREAD_NAP
bgt 10f
IDLE_STATE_ENTER_SEQ(PPC_NAP)
/* No return */
10:
cmpwi r3,PNV_THREAD_SLEEP
bgt 2f
IDLE_STATE_ENTER_SEQ(PPC_SLEEP)
/* No return */
2:
/*
* Go back to winkle. Please note that this thread was woken up in
* machine check from winkle and have not restored the per-subcore
* state. Hence before going back to winkle, set last bit of HSPGR0
* to 1. This will make sure that if this thread gets woken up
* again at reset vector 0x100 then it will get chance to restore
* the subcore state.
*/
ori r13,r13,1
SET_PACA(r13)
IDLE_STATE_ENTER_SEQ(PPC_WINKLE)
/* No return */
4: 4:
#endif #endif
/* /*
......
...@@ -44,18 +44,6 @@ ...@@ -44,18 +44,6 @@
PSSCR_PSLL_MASK | PSSCR_TR_MASK | \ PSSCR_PSLL_MASK | PSSCR_TR_MASK | \
PSSCR_MTL_MASK PSSCR_MTL_MASK
/* Idle state entry routines */
#define IDLE_STATE_ENTER_SEQ(IDLE_INST) \
/* Magic NAP/SLEEP/WINKLE mode enter sequence */ \
std r0,0(r1); \
ptesync; \
ld r0,0(r1); \
1: cmp cr0,r0,r0; \
bne 1b; \
IDLE_INST; \
b .
.text .text
/* /*
...@@ -363,8 +351,8 @@ _GLOBAL(power9_idle_stop) ...@@ -363,8 +351,8 @@ _GLOBAL(power9_idle_stop)
* cr3 - set to gt if waking up with partial/complete hypervisor state loss * cr3 - set to gt if waking up with partial/complete hypervisor state loss
*/ */
_GLOBAL(pnv_restore_hyp_resource) _GLOBAL(pnv_restore_hyp_resource)
ld r2,PACATOC(r13);
BEGIN_FTR_SECTION BEGIN_FTR_SECTION
ld r2,PACATOC(r13);
/* /*
* POWER ISA 3. Use PSSCR to determine if we * POWER ISA 3. Use PSSCR to determine if we
* are waking up from deep idle state * are waking up from deep idle state
...@@ -395,6 +383,9 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) ...@@ -395,6 +383,9 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
*/ */
clrldi r5,r13,63 clrldi r5,r13,63
clrrdi r13,r13,1 clrrdi r13,r13,1
/* Now that we are sure r13 is corrected, load TOC */
ld r2,PACATOC(r13);
cmpwi cr4,r5,1 cmpwi cr4,r5,1
mtspr SPRN_HSPRG0,r13 mtspr SPRN_HSPRG0,r13
......
...@@ -92,7 +92,8 @@ void save_mce_event(struct pt_regs *regs, long handled, ...@@ -92,7 +92,8 @@ void save_mce_event(struct pt_regs *regs, long handled,
mce->in_use = 1; mce->in_use = 1;
mce->initiator = MCE_INITIATOR_CPU; mce->initiator = MCE_INITIATOR_CPU;
if (handled) /* Mark it recovered if we have handled it and MSR(RI=1). */
if (handled && (regs->msr & MSR_RI))
mce->disposition = MCE_DISPOSITION_RECOVERED; mce->disposition = MCE_DISPOSITION_RECOVERED;
else else
mce->disposition = MCE_DISPOSITION_NOT_RECOVERED; mce->disposition = MCE_DISPOSITION_NOT_RECOVERED;
......
...@@ -78,6 +78,7 @@ EXPORT_SYMBOL(get_pci_dma_ops); ...@@ -78,6 +78,7 @@ EXPORT_SYMBOL(get_pci_dma_ops);
static int get_phb_number(struct device_node *dn) static int get_phb_number(struct device_node *dn)
{ {
int ret, phb_id = -1; int ret, phb_id = -1;
u32 prop_32;
u64 prop; u64 prop;
/* /*
...@@ -86,8 +87,10 @@ static int get_phb_number(struct device_node *dn) ...@@ -86,8 +87,10 @@ static int get_phb_number(struct device_node *dn)
* reading "ibm,opal-phbid", only present in OPAL environment. * reading "ibm,opal-phbid", only present in OPAL environment.
*/ */
ret = of_property_read_u64(dn, "ibm,opal-phbid", &prop); ret = of_property_read_u64(dn, "ibm,opal-phbid", &prop);
if (ret) if (ret) {
ret = of_property_read_u32_index(dn, "reg", 1, (u32 *)&prop); ret = of_property_read_u32_index(dn, "reg", 1, &prop_32);
prop = prop_32;
}
if (!ret) if (!ret)
phb_id = (int)(prop & (MAX_PHBS - 1)); phb_id = (int)(prop & (MAX_PHBS - 1));
......
...@@ -1074,26 +1074,6 @@ static inline void restore_sprs(struct thread_struct *old_thread, ...@@ -1074,26 +1074,6 @@ static inline void restore_sprs(struct thread_struct *old_thread,
#endif #endif
} }
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
void flush_tmregs_to_thread(struct task_struct *tsk)
{
/*
* Process self tracing is not yet supported through
* ptrace interface. Ptrace generic code should have
* prevented this from happening in the first place.
* Warn once here with the message, if some how it
* is attempted.
*/
WARN_ONCE(tsk == current,
"Not expecting ptrace on self: TM regs may be incorrect\n");
/*
* If task is not current, it should have been flushed
* already to it's thread_struct during __switch_to().
*/
}
#endif
struct task_struct *__switch_to(struct task_struct *prev, struct task_struct *__switch_to(struct task_struct *prev,
struct task_struct *new) struct task_struct *new)
{ {
......
...@@ -2940,7 +2940,7 @@ unsigned long __init prom_init(unsigned long r3, unsigned long r4, ...@@ -2940,7 +2940,7 @@ unsigned long __init prom_init(unsigned long r3, unsigned long r4,
/* Don't print anything after quiesce under OPAL, it crashes OFW */ /* Don't print anything after quiesce under OPAL, it crashes OFW */
if (of_platform != PLATFORM_OPAL) { if (of_platform != PLATFORM_OPAL) {
prom_printf("Booting Linux via __start() ...\n"); prom_printf("Booting Linux via __start() @ 0x%lx ...\n", kbase);
prom_debug("->dt_header_start=0x%x\n", hdr); prom_debug("->dt_header_start=0x%x\n", hdr);
} }
......
...@@ -38,6 +38,7 @@ ...@@ -38,6 +38,7 @@
#include <asm/page.h> #include <asm/page.h>
#include <asm/pgtable.h> #include <asm/pgtable.h>
#include <asm/switch_to.h> #include <asm/switch_to.h>
#include <asm/tm.h>
#define CREATE_TRACE_POINTS #define CREATE_TRACE_POINTS
#include <trace/events/syscalls.h> #include <trace/events/syscalls.h>
...@@ -118,6 +119,24 @@ static const struct pt_regs_offset regoffset_table[] = { ...@@ -118,6 +119,24 @@ static const struct pt_regs_offset regoffset_table[] = {
REG_OFFSET_END, REG_OFFSET_END,
}; };
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
static void flush_tmregs_to_thread(struct task_struct *tsk)
{
/*
* If task is not current, it will have been flushed already to
* it's thread_struct during __switch_to().
*
* A reclaim flushes ALL the state.
*/
if (tsk == current && MSR_TM_SUSPENDED(mfmsr()))
tm_reclaim_current(TM_CAUSE_SIGNAL);
}
#else
static inline void flush_tmregs_to_thread(struct task_struct *tsk) { }
#endif
/** /**
* regs_query_register_offset() - query register offset from its name * regs_query_register_offset() - query register offset from its name
* @name: the name of a register * @name: the name of a register
......
...@@ -93,15 +93,16 @@ notrace unsigned long __init early_init(unsigned long dt_ptr) ...@@ -93,15 +93,16 @@ notrace unsigned long __init early_init(unsigned long dt_ptr)
* and we are running with enough of the MMU enabled to have our * and we are running with enough of the MMU enabled to have our
* proper kernel virtual addresses * proper kernel virtual addresses
* *
* Find out what kind of machine we're on and save any data we need * We do the initial parsing of the flat device-tree and prepares
* from the early boot process (devtree is copied on pmac by prom_init()). * for the MMU to be fully initialized.
* This is called very early on the boot process, after a minimal
* MMU environment has been set up but before MMU_init is called.
*/ */
extern unsigned int memset_nocache_branch; /* Insn to be replaced by NOP */ extern unsigned int memset_nocache_branch; /* Insn to be replaced by NOP */
notrace void __init machine_init(u64 dt_ptr) notrace void __init machine_init(u64 dt_ptr)
{ {
/* Configure static keys first, now that we're relocated. */
setup_feature_keys();
/* Enable early debugging if any specified (see udbg.h) */ /* Enable early debugging if any specified (see udbg.h) */
udbg_early_init(); udbg_early_init();
......
...@@ -300,6 +300,7 @@ void __init early_setup(unsigned long dt_ptr) ...@@ -300,6 +300,7 @@ void __init early_setup(unsigned long dt_ptr)
/* Apply all the dynamic patching */ /* Apply all the dynamic patching */
apply_feature_fixups(); apply_feature_fixups();
setup_feature_keys();
/* Initialize the hash table or TLB handling */ /* Initialize the hash table or TLB handling */
early_init_mmu(); early_init_mmu();
......
...@@ -22,6 +22,7 @@ ...@@ -22,6 +22,7 @@
#include <linux/security.h> #include <linux/security.h>
#include <linux/memblock.h> #include <linux/memblock.h>
#include <asm/cpu_has_feature.h>
#include <asm/pgtable.h> #include <asm/pgtable.h>
#include <asm/processor.h> #include <asm/processor.h>
#include <asm/mmu.h> #include <asm/mmu.h>
......
...@@ -30,7 +30,7 @@ CPPFLAGS_vdso32.lds += -P -C -Upowerpc ...@@ -30,7 +30,7 @@ CPPFLAGS_vdso32.lds += -P -C -Upowerpc
$(obj)/vdso32_wrapper.o : $(obj)/vdso32.so $(obj)/vdso32_wrapper.o : $(obj)/vdso32.so
# link rule for the .so file, .lds has to be first # link rule for the .so file, .lds has to be first
$(obj)/vdso32.so.dbg: $(src)/vdso32.lds $(obj-vdso32) $(obj)/vdso32.so.dbg: $(src)/vdso32.lds $(obj-vdso32) FORCE
$(call if_changed,vdso32ld) $(call if_changed,vdso32ld)
# strip rule for the .so file # strip rule for the .so file
...@@ -39,12 +39,12 @@ $(obj)/%.so: $(obj)/%.so.dbg FORCE ...@@ -39,12 +39,12 @@ $(obj)/%.so: $(obj)/%.so.dbg FORCE
$(call if_changed,objcopy) $(call if_changed,objcopy)
# assembly rules for the .S files # assembly rules for the .S files
$(obj-vdso32): %.o: %.S $(obj-vdso32): %.o: %.S FORCE
$(call if_changed_dep,vdso32as) $(call if_changed_dep,vdso32as)
# actual build commands # actual build commands
quiet_cmd_vdso32ld = VDSO32L $@ quiet_cmd_vdso32ld = VDSO32L $@
cmd_vdso32ld = $(CROSS32CC) $(c_flags) -Wl,-T $^ -o $@ cmd_vdso32ld = $(CROSS32CC) $(c_flags) -o $@ -Wl,-T$(filter %.lds,$^) $(filter %.o,$^)
quiet_cmd_vdso32as = VDSO32A $@ quiet_cmd_vdso32as = VDSO32A $@
cmd_vdso32as = $(CROSS32CC) $(a_flags) -c -o $@ $< cmd_vdso32as = $(CROSS32CC) $(a_flags) -c -o $@ $<
......
...@@ -23,7 +23,7 @@ CPPFLAGS_vdso64.lds += -P -C -U$(ARCH) ...@@ -23,7 +23,7 @@ CPPFLAGS_vdso64.lds += -P -C -U$(ARCH)
$(obj)/vdso64_wrapper.o : $(obj)/vdso64.so $(obj)/vdso64_wrapper.o : $(obj)/vdso64.so
# link rule for the .so file, .lds has to be first # link rule for the .so file, .lds has to be first
$(obj)/vdso64.so.dbg: $(src)/vdso64.lds $(obj-vdso64) $(obj)/vdso64.so.dbg: $(src)/vdso64.lds $(obj-vdso64) FORCE
$(call if_changed,vdso64ld) $(call if_changed,vdso64ld)
# strip rule for the .so file # strip rule for the .so file
...@@ -32,12 +32,12 @@ $(obj)/%.so: $(obj)/%.so.dbg FORCE ...@@ -32,12 +32,12 @@ $(obj)/%.so: $(obj)/%.so.dbg FORCE
$(call if_changed,objcopy) $(call if_changed,objcopy)
# assembly rules for the .S files # assembly rules for the .S files
$(obj-vdso64): %.o: %.S $(obj-vdso64): %.o: %.S FORCE
$(call if_changed_dep,vdso64as) $(call if_changed_dep,vdso64as)
# actual build commands # actual build commands
quiet_cmd_vdso64ld = VDSO64L $@ quiet_cmd_vdso64ld = VDSO64L $@
cmd_vdso64ld = $(CC) $(c_flags) -Wl,-T $^ -o $@ cmd_vdso64ld = $(CC) $(c_flags) -o $@ -Wl,-T$(filter %.lds,$^) $(filter %.o,$^)
quiet_cmd_vdso64as = VDSO64A $@ quiet_cmd_vdso64as = VDSO64A $@
cmd_vdso64as = $(CC) $(a_flags) -c -o $@ $< cmd_vdso64as = $(CC) $(a_flags) -c -o $@ $<
......
...@@ -127,8 +127,9 @@ _GLOBAL(csum_partial_copy_generic) ...@@ -127,8 +127,9 @@ _GLOBAL(csum_partial_copy_generic)
stw r7,12(r1) stw r7,12(r1)
stw r8,8(r1) stw r8,8(r1)
andi. r0,r4,1 /* is destination address even ? */ rlwinm r0,r4,3,0x8
cmplwi cr7,r0,0 rlwnm r6,r6,r0,0,31 /* odd destination address: rotate one byte */
cmplwi cr7,r0,0 /* is destination address even ? */
addic r12,r6,0 addic r12,r6,0
addi r6,r4,-4 addi r6,r4,-4
neg r0,r4 neg r0,r4
...@@ -237,7 +238,7 @@ _GLOBAL(csum_partial_copy_generic) ...@@ -237,7 +238,7 @@ _GLOBAL(csum_partial_copy_generic)
66: addze r3,r12 66: addze r3,r12
addi r1,r1,16 addi r1,r1,16
beqlr+ cr7 beqlr+ cr7
rlwinm r3,r3,8,0,31 /* swap bytes for odd destination */ rlwinm r3,r3,8,0,31 /* odd destination address: rotate one byte */
blr blr
/* read fault */ /* read fault */
......
...@@ -188,7 +188,10 @@ void __init apply_feature_fixups(void) ...@@ -188,7 +188,10 @@ void __init apply_feature_fixups(void)
&__start___fw_ftr_fixup, &__stop___fw_ftr_fixup); &__start___fw_ftr_fixup, &__stop___fw_ftr_fixup);
#endif #endif
do_final_fixups(); do_final_fixups();
}
void __init setup_feature_keys(void)
{
/* /*
* Initialise jump label. This causes all the cpu/mmu_has_feature() * Initialise jump label. This causes all the cpu/mmu_has_feature()
* checks to take on their correct polarity based on the current set of * checks to take on their correct polarity based on the current set of
......
...@@ -496,8 +496,10 @@ spufs_mkgang(struct inode *dir, struct dentry *dentry, umode_t mode) ...@@ -496,8 +496,10 @@ spufs_mkgang(struct inode *dir, struct dentry *dentry, umode_t mode)
gang = alloc_spu_gang(); gang = alloc_spu_gang();
SPUFS_I(inode)->i_ctx = NULL; SPUFS_I(inode)->i_ctx = NULL;
SPUFS_I(inode)->i_gang = gang; SPUFS_I(inode)->i_gang = gang;
if (!gang) if (!gang) {
ret = -ENOMEM;
goto out_iput; goto out_iput;
}
inode->i_op = &simple_dir_inode_operations; inode->i_op = &simple_dir_inode_operations;
inode->i_fop = &simple_dir_operations; inode->i_fop = &simple_dir_operations;
......
...@@ -187,6 +187,11 @@ static void pci_dma_dev_setup_pasemi(struct pci_dev *dev) ...@@ -187,6 +187,11 @@ static void pci_dma_dev_setup_pasemi(struct pci_dev *dev)
if (dev->vendor == 0x1959 && dev->device == 0xa007 && if (dev->vendor == 0x1959 && dev->device == 0xa007 &&
!firmware_has_feature(FW_FEATURE_LPAR)) { !firmware_has_feature(FW_FEATURE_LPAR)) {
dev->dev.archdata.dma_ops = &dma_direct_ops; dev->dev.archdata.dma_ops = &dma_direct_ops;
/*
* Set the coherent DMA mask to prevent the iommu
* being used unnecessarily
*/
dev->dev.coherent_dma_mask = DMA_BIT_MASK(44);
return; return;
} }
#endif #endif
......
...@@ -228,7 +228,8 @@ int __init opal_event_init(void) ...@@ -228,7 +228,8 @@ int __init opal_event_init(void)
} }
/* Install interrupt handler */ /* Install interrupt handler */
rc = request_irq(virq, opal_interrupt, 0, "opal", NULL); rc = request_irq(virq, opal_interrupt, IRQF_TRIGGER_LOW,
"opal", NULL);
if (rc) { if (rc) {
irq_dispose_mapping(virq); irq_dispose_mapping(virq);
pr_warn("Error %d requesting irq %d (0x%x)\n", pr_warn("Error %d requesting irq %d (0x%x)\n",
......
...@@ -399,6 +399,7 @@ static int opal_recover_mce(struct pt_regs *regs, ...@@ -399,6 +399,7 @@ static int opal_recover_mce(struct pt_regs *regs,
if (!(regs->msr & MSR_RI)) { if (!(regs->msr & MSR_RI)) {
/* If MSR_RI isn't set, we cannot recover */ /* If MSR_RI isn't set, we cannot recover */
pr_err("Machine check interrupt unrecoverable: MSR(RI=0)\n");
recovered = 0; recovered = 0;
} else if (evt->disposition == MCE_DISPOSITION_RECOVERED) { } else if (evt->disposition == MCE_DISPOSITION_RECOVERED) {
/* Platform corrected itself */ /* Platform corrected itself */
......
...@@ -111,10 +111,17 @@ static int __init iommu_setup(char *str) ...@@ -111,10 +111,17 @@ static int __init iommu_setup(char *str)
} }
early_param("iommu", iommu_setup); early_param("iommu", iommu_setup);
static inline bool pnv_pci_is_mem_pref_64(unsigned long flags) static inline bool pnv_pci_is_m64(struct pnv_phb *phb, struct resource *r)
{ {
return ((flags & (IORESOURCE_MEM_64 | IORESOURCE_PREFETCH)) == /*
(IORESOURCE_MEM_64 | IORESOURCE_PREFETCH)); * WARNING: We cannot rely on the resource flags. The Linux PCI
* allocation code sometimes decides to put a 64-bit prefetchable
* BAR in the 32-bit window, so we have to compare the addresses.
*
* For simplicity we only test resource start.
*/
return (r->start >= phb->ioda.m64_base &&
r->start < (phb->ioda.m64_base + phb->ioda.m64_size));
} }
static struct pnv_ioda_pe *pnv_ioda_init_pe(struct pnv_phb *phb, int pe_no) static struct pnv_ioda_pe *pnv_ioda_init_pe(struct pnv_phb *phb, int pe_no)
...@@ -229,7 +236,7 @@ static void pnv_ioda_reserve_dev_m64_pe(struct pci_dev *pdev, ...@@ -229,7 +236,7 @@ static void pnv_ioda_reserve_dev_m64_pe(struct pci_dev *pdev,
sgsz = phb->ioda.m64_segsize; sgsz = phb->ioda.m64_segsize;
for (i = 0; i <= PCI_ROM_RESOURCE; i++) { for (i = 0; i <= PCI_ROM_RESOURCE; i++) {
r = &pdev->resource[i]; r = &pdev->resource[i];
if (!r->parent || !pnv_pci_is_mem_pref_64(r->flags)) if (!r->parent || !pnv_pci_is_m64(phb, r))
continue; continue;
start = _ALIGN_DOWN(r->start - base, sgsz); start = _ALIGN_DOWN(r->start - base, sgsz);
...@@ -1877,7 +1884,7 @@ static void pnv_pci_phb3_tce_invalidate(struct pnv_ioda_pe *pe, bool rm, ...@@ -1877,7 +1884,7 @@ static void pnv_pci_phb3_tce_invalidate(struct pnv_ioda_pe *pe, bool rm,
unsigned shift, unsigned long index, unsigned shift, unsigned long index,
unsigned long npages) unsigned long npages)
{ {
__be64 __iomem *invalidate = pnv_ioda_get_inval_reg(pe->phb, false); __be64 __iomem *invalidate = pnv_ioda_get_inval_reg(pe->phb, rm);
unsigned long start, end, inc; unsigned long start, end, inc;
/* We'll invalidate DMA address in PE scope */ /* We'll invalidate DMA address in PE scope */
...@@ -2863,7 +2870,7 @@ static void pnv_pci_ioda_fixup_iov_resources(struct pci_dev *pdev) ...@@ -2863,7 +2870,7 @@ static void pnv_pci_ioda_fixup_iov_resources(struct pci_dev *pdev)
res = &pdev->resource[i + PCI_IOV_RESOURCES]; res = &pdev->resource[i + PCI_IOV_RESOURCES];
if (!res->flags || res->parent) if (!res->flags || res->parent)
continue; continue;
if (!pnv_pci_is_mem_pref_64(res->flags)) { if (!pnv_pci_is_m64(phb, res)) {
dev_warn(&pdev->dev, "Don't support SR-IOV with" dev_warn(&pdev->dev, "Don't support SR-IOV with"
" non M64 VF BAR%d: %pR. \n", " non M64 VF BAR%d: %pR. \n",
i, res); i, res);
...@@ -2958,7 +2965,7 @@ static void pnv_ioda_setup_pe_res(struct pnv_ioda_pe *pe, ...@@ -2958,7 +2965,7 @@ static void pnv_ioda_setup_pe_res(struct pnv_ioda_pe *pe,
index++; index++;
} }
} else if ((res->flags & IORESOURCE_MEM) && } else if ((res->flags & IORESOURCE_MEM) &&
!pnv_pci_is_mem_pref_64(res->flags)) { !pnv_pci_is_m64(phb, res)) {
region.start = res->start - region.start = res->start -
phb->hose->mem_offset[0] - phb->hose->mem_offset[0] -
phb->ioda.m32_pci_base; phb->ioda.m32_pci_base;
...@@ -3083,9 +3090,12 @@ static resource_size_t pnv_pci_window_alignment(struct pci_bus *bus, ...@@ -3083,9 +3090,12 @@ static resource_size_t pnv_pci_window_alignment(struct pci_bus *bus,
bridge = bridge->bus->self; bridge = bridge->bus->self;
} }
/* We fail back to M32 if M64 isn't supported */ /*
if (phb->ioda.m64_segsize && * We fall back to M32 if M64 isn't supported. We enforce the M64
pnv_pci_is_mem_pref_64(type)) * alignment for any 64-bit resource, PCIe doesn't care and
* bridges only do 64-bit prefetchable anyway.
*/
if (phb->ioda.m64_segsize && (type & IORESOURCE_MEM_64))
return phb->ioda.m64_segsize; return phb->ioda.m64_segsize;
if (type & IORESOURCE_MEM) if (type & IORESOURCE_MEM)
return phb->ioda.m32_segsize; return phb->ioda.m32_segsize;
...@@ -3125,7 +3135,7 @@ static void pnv_pci_fixup_bridge_resources(struct pci_bus *bus, ...@@ -3125,7 +3135,7 @@ static void pnv_pci_fixup_bridge_resources(struct pci_bus *bus,
w = NULL; w = NULL;
if (r->flags & type & IORESOURCE_IO) if (r->flags & type & IORESOURCE_IO)
w = &hose->io_resource; w = &hose->io_resource;
else if (pnv_pci_is_mem_pref_64(r->flags) && else if (pnv_pci_is_m64(phb, r) &&
(type & IORESOURCE_PREFETCH) && (type & IORESOURCE_PREFETCH) &&
phb->ioda.m64_segsize) phb->ioda.m64_segsize)
w = &hose->mem_resources[1]; w = &hose->mem_resources[1];
......
...@@ -320,19 +320,6 @@ static int dlpar_remove_device_tree_lmb(struct of_drconf_cell *lmb) ...@@ -320,19 +320,6 @@ static int dlpar_remove_device_tree_lmb(struct of_drconf_cell *lmb)
return dlpar_update_device_tree_lmb(lmb); return dlpar_update_device_tree_lmb(lmb);
} }
static struct memory_block *lmb_to_memblock(struct of_drconf_cell *lmb)
{
unsigned long section_nr;
struct mem_section *mem_sect;
struct memory_block *mem_block;
section_nr = pfn_to_section_nr(PFN_DOWN(lmb->base_addr));
mem_sect = __nr_to_section(section_nr);
mem_block = find_memory_block(mem_sect);
return mem_block;
}
#ifdef CONFIG_MEMORY_HOTREMOVE #ifdef CONFIG_MEMORY_HOTREMOVE
static int pseries_remove_memblock(unsigned long base, unsigned int memblock_size) static int pseries_remove_memblock(unsigned long base, unsigned int memblock_size)
{ {
...@@ -420,6 +407,19 @@ static bool lmb_is_removable(struct of_drconf_cell *lmb) ...@@ -420,6 +407,19 @@ static bool lmb_is_removable(struct of_drconf_cell *lmb)
static int dlpar_add_lmb(struct of_drconf_cell *); static int dlpar_add_lmb(struct of_drconf_cell *);
static struct memory_block *lmb_to_memblock(struct of_drconf_cell *lmb)
{
unsigned long section_nr;
struct mem_section *mem_sect;
struct memory_block *mem_block;
section_nr = pfn_to_section_nr(PFN_DOWN(lmb->base_addr));
mem_sect = __nr_to_section(section_nr);
mem_block = find_memory_block(mem_sect);
return mem_block;
}
static int dlpar_remove_lmb(struct of_drconf_cell *lmb) static int dlpar_remove_lmb(struct of_drconf_cell *lmb)
{ {
struct memory_block *mem_block; struct memory_block *mem_block;
......
config PPC_XICS config PPC_XICS
def_bool n def_bool n
select PPC_SMP_MUXED_IPI select PPC_SMP_MUXED_IPI
select HARDIRQS_SW_RESEND
config PPC_ICP_NATIVE config PPC_ICP_NATIVE
def_bool n def_bool n
......
...@@ -156,7 +156,9 @@ static struct irq_chip ics_opal_irq_chip = { ...@@ -156,7 +156,9 @@ static struct irq_chip ics_opal_irq_chip = {
.irq_mask = ics_opal_mask_irq, .irq_mask = ics_opal_mask_irq,
.irq_unmask = ics_opal_unmask_irq, .irq_unmask = ics_opal_unmask_irq,
.irq_eoi = NULL, /* Patched at init time */ .irq_eoi = NULL, /* Patched at init time */
.irq_set_affinity = ics_opal_set_affinity .irq_set_affinity = ics_opal_set_affinity,
.irq_set_type = xics_set_irq_type,
.irq_retrigger = xics_retrigger,
}; };
static int ics_opal_map(struct ics *ics, unsigned int virq); static int ics_opal_map(struct ics *ics, unsigned int virq);
......
...@@ -163,7 +163,9 @@ static struct irq_chip ics_rtas_irq_chip = { ...@@ -163,7 +163,9 @@ static struct irq_chip ics_rtas_irq_chip = {
.irq_mask = ics_rtas_mask_irq, .irq_mask = ics_rtas_mask_irq,
.irq_unmask = ics_rtas_unmask_irq, .irq_unmask = ics_rtas_unmask_irq,
.irq_eoi = NULL, /* Patched at init time */ .irq_eoi = NULL, /* Patched at init time */
.irq_set_affinity = ics_rtas_set_affinity .irq_set_affinity = ics_rtas_set_affinity,
.irq_set_type = xics_set_irq_type,
.irq_retrigger = xics_retrigger,
}; };
static int ics_rtas_map(struct ics *ics, unsigned int virq) static int ics_rtas_map(struct ics *ics, unsigned int virq)
......
...@@ -328,8 +328,12 @@ static int xics_host_map(struct irq_domain *h, unsigned int virq, ...@@ -328,8 +328,12 @@ static int xics_host_map(struct irq_domain *h, unsigned int virq,
pr_devel("xics: map virq %d, hwirq 0x%lx\n", virq, hw); pr_devel("xics: map virq %d, hwirq 0x%lx\n", virq, hw);
/* They aren't all level sensitive but we just don't really know */ /*
irq_set_status_flags(virq, IRQ_LEVEL); * Mark interrupts as edge sensitive by default so that resend
* actually works. The device-tree parsing will turn the LSIs
* back to level.
*/
irq_clear_status_flags(virq, IRQ_LEVEL);
/* Don't call into ICS for IPIs */ /* Don't call into ICS for IPIs */
if (hw == XICS_IPI) { if (hw == XICS_IPI) {
...@@ -351,13 +355,54 @@ static int xics_host_xlate(struct irq_domain *h, struct device_node *ct, ...@@ -351,13 +355,54 @@ static int xics_host_xlate(struct irq_domain *h, struct device_node *ct,
irq_hw_number_t *out_hwirq, unsigned int *out_flags) irq_hw_number_t *out_hwirq, unsigned int *out_flags)
{ {
/* Current xics implementation translates everything
* to level. It is not technically right for MSIs but this
* is irrelevant at this point. We might get smarter in the future
*/
*out_hwirq = intspec[0]; *out_hwirq = intspec[0];
/*
* If intsize is at least 2, we look for the type in the second cell,
* we assume the LSB indicates a level interrupt.
*/
if (intsize > 1) {
if (intspec[1] & 1)
*out_flags = IRQ_TYPE_LEVEL_LOW; *out_flags = IRQ_TYPE_LEVEL_LOW;
else
*out_flags = IRQ_TYPE_EDGE_RISING;
} else
*out_flags = IRQ_TYPE_LEVEL_LOW;
return 0;
}
int xics_set_irq_type(struct irq_data *d, unsigned int flow_type)
{
/*
* We only support these. This has really no effect other than setting
* the corresponding descriptor bits mind you but those will in turn
* affect the resend function when re-enabling an edge interrupt.
*
* Set set the default to edge as explained in map().
*/
if (flow_type == IRQ_TYPE_DEFAULT || flow_type == IRQ_TYPE_NONE)
flow_type = IRQ_TYPE_EDGE_RISING;
if (flow_type != IRQ_TYPE_EDGE_RISING &&
flow_type != IRQ_TYPE_LEVEL_LOW)
return -EINVAL;
irqd_set_trigger_type(d, flow_type);
return IRQ_SET_MASK_OK_NOCOPY;
}
int xics_retrigger(struct irq_data *data)
{
/*
* We need to push a dummy CPPR when retriggering, since the subsequent
* EOI will try to pop it. Passing 0 works, as the function hard codes
* the priority value anyway.
*/
xics_push_cppr(0);
/* Tell the core to do a soft retrigger */
return 0; return 0;
} }
......
...@@ -90,8 +90,7 @@ int cxl_context_init(struct cxl_context *ctx, struct cxl_afu *afu, bool master, ...@@ -90,8 +90,7 @@ int cxl_context_init(struct cxl_context *ctx, struct cxl_afu *afu, bool master,
*/ */
mutex_lock(&afu->contexts_lock); mutex_lock(&afu->contexts_lock);
idr_preload(GFP_KERNEL); idr_preload(GFP_KERNEL);
i = idr_alloc(&ctx->afu->contexts_idr, ctx, i = idr_alloc(&ctx->afu->contexts_idr, ctx, ctx->afu->adapter->min_pe,
ctx->afu->adapter->native->sl_ops->min_pe,
ctx->afu->num_procs, GFP_NOWAIT); ctx->afu->num_procs, GFP_NOWAIT);
idr_preload_end(); idr_preload_end();
mutex_unlock(&afu->contexts_lock); mutex_unlock(&afu->contexts_lock);
......
...@@ -561,7 +561,6 @@ struct cxl_service_layer_ops { ...@@ -561,7 +561,6 @@ struct cxl_service_layer_ops {
u64 (*timebase_read)(struct cxl *adapter); u64 (*timebase_read)(struct cxl *adapter);
int capi_mode; int capi_mode;
bool needs_reset_before_disable; bool needs_reset_before_disable;
int min_pe;
}; };
struct cxl_native { struct cxl_native {
...@@ -603,6 +602,7 @@ struct cxl { ...@@ -603,6 +602,7 @@ struct cxl {
struct bin_attribute cxl_attr; struct bin_attribute cxl_attr;
int adapter_num; int adapter_num;
int user_irqs; int user_irqs;
int min_pe;
u64 ps_size; u64 ps_size;
u16 psl_rev; u16 psl_rev;
u16 base_image; u16 base_image;
......
...@@ -924,7 +924,7 @@ static irqreturn_t native_irq_multiplexed(int irq, void *data) ...@@ -924,7 +924,7 @@ static irqreturn_t native_irq_multiplexed(int irq, void *data)
return fail_psl_irq(afu, &irq_info); return fail_psl_irq(afu, &irq_info);
} }
void native_irq_wait(struct cxl_context *ctx) static void native_irq_wait(struct cxl_context *ctx)
{ {
u64 dsisr; u64 dsisr;
int timeout = 1000; int timeout = 1000;
......
...@@ -379,7 +379,7 @@ static int calc_capp_routing(struct pci_dev *dev, u64 *chipid, u64 *capp_unit_id ...@@ -379,7 +379,7 @@ static int calc_capp_routing(struct pci_dev *dev, u64 *chipid, u64 *capp_unit_id
static int init_implementation_adapter_psl_regs(struct cxl *adapter, struct pci_dev *dev) static int init_implementation_adapter_psl_regs(struct cxl *adapter, struct pci_dev *dev)
{ {
u64 psl_dsnctl; u64 psl_dsnctl, psl_fircntl;
u64 chipid; u64 chipid;
u64 capp_unit_id; u64 capp_unit_id;
int rc; int rc;
...@@ -398,8 +398,11 @@ static int init_implementation_adapter_psl_regs(struct cxl *adapter, struct pci_ ...@@ -398,8 +398,11 @@ static int init_implementation_adapter_psl_regs(struct cxl *adapter, struct pci_
cxl_p1_write(adapter, CXL_PSL_RESLCKTO, 0x20000000200ULL); cxl_p1_write(adapter, CXL_PSL_RESLCKTO, 0x20000000200ULL);
/* snoop write mask */ /* snoop write mask */
cxl_p1_write(adapter, CXL_PSL_SNWRALLOC, 0x00000000FFFFFFFFULL); cxl_p1_write(adapter, CXL_PSL_SNWRALLOC, 0x00000000FFFFFFFFULL);
/* set fir_accum */ /* set fir_cntl to recommended value for production env */
cxl_p1_write(adapter, CXL_PSL_FIR_CNTL, 0x0800000000000000ULL); psl_fircntl = (0x2ULL << (63-3)); /* ce_report */
psl_fircntl |= (0x1ULL << (63-6)); /* FIR_report */
psl_fircntl |= 0x1ULL; /* ce_thresh */
cxl_p1_write(adapter, CXL_PSL_FIR_CNTL, psl_fircntl);
/* for debugging with trace arrays */ /* for debugging with trace arrays */
cxl_p1_write(adapter, CXL_PSL_TRACE, 0x0000FF7C00000000ULL); cxl_p1_write(adapter, CXL_PSL_TRACE, 0x0000FF7C00000000ULL);
...@@ -1521,14 +1524,15 @@ static const struct cxl_service_layer_ops xsl_ops = { ...@@ -1521,14 +1524,15 @@ static const struct cxl_service_layer_ops xsl_ops = {
.write_timebase_ctrl = write_timebase_ctrl_xsl, .write_timebase_ctrl = write_timebase_ctrl_xsl,
.timebase_read = timebase_read_xsl, .timebase_read = timebase_read_xsl,
.capi_mode = OPAL_PHB_CAPI_MODE_DMA, .capi_mode = OPAL_PHB_CAPI_MODE_DMA,
.min_pe = 1, /* Workaround for Mellanox CX4 HW bug */
}; };
static void set_sl_ops(struct cxl *adapter, struct pci_dev *dev) static void set_sl_ops(struct cxl *adapter, struct pci_dev *dev)
{ {
if (dev->vendor == PCI_VENDOR_ID_MELLANOX && dev->device == 0x1013) { if (dev->vendor == PCI_VENDOR_ID_MELLANOX && dev->device == 0x1013) {
/* Mellanox CX-4 */
dev_info(&adapter->dev, "Device uses an XSL\n"); dev_info(&adapter->dev, "Device uses an XSL\n");
adapter->native->sl_ops = &xsl_ops; adapter->native->sl_ops = &xsl_ops;
adapter->min_pe = 1; /* Workaround for CX-4 hardware bug */
} else { } else {
dev_info(&adapter->dev, "Device uses a PSL\n"); dev_info(&adapter->dev, "Device uses a PSL\n");
adapter->native->sl_ops = &psl_ops; adapter->native->sl_ops = &psl_ops;
......
...@@ -221,7 +221,7 @@ int cxl_pci_vphb_add(struct cxl_afu *afu) ...@@ -221,7 +221,7 @@ int cxl_pci_vphb_add(struct cxl_afu *afu)
/* Setup the PHB using arch provided callback */ /* Setup the PHB using arch provided callback */
phb->ops = &cxl_pcie_pci_ops; phb->ops = &cxl_pcie_pci_ops;
phb->cfg_addr = NULL; phb->cfg_addr = NULL;
phb->cfg_data = 0; phb->cfg_data = NULL;
phb->private_data = afu; phb->private_data = afu;
phb->controller_ops = cxl_pci_controller_ops; phb->controller_ops = cxl_pci_controller_ops;
......
...@@ -136,8 +136,8 @@ struct cxl_event_afu_driver_reserved { ...@@ -136,8 +136,8 @@ struct cxl_event_afu_driver_reserved {
* *
* Of course the contents will be ABI, but that's up the AFU driver. * Of course the contents will be ABI, but that's up the AFU driver.
*/ */
size_t data_size; __u32 data_size;
u8 data[]; __u8 data[];
}; };
struct cxl_event { struct cxl_event {
......
...@@ -8,7 +8,7 @@ ifeq ($(ARCH),powerpc) ...@@ -8,7 +8,7 @@ ifeq ($(ARCH),powerpc)
GIT_VERSION = $(shell git describe --always --long --dirty || echo "unknown") GIT_VERSION = $(shell git describe --always --long --dirty || echo "unknown")
CFLAGS := -Wall -O2 -Wall -Werror -DGIT_VERSION='"$(GIT_VERSION)"' -I$(CURDIR) $(CFLAGS) CFLAGS := -std=gnu99 -Wall -O2 -Wall -Werror -DGIT_VERSION='"$(GIT_VERSION)"' -I$(CURDIR) $(CFLAGS)
export CFLAGS export CFLAGS
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment