Commit d08de37b authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'powerpc-4.17-3' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux

Pull powerpc fixes from Michael Ellerman:

 - Fix an off-by-one bug in our alternative asm patching which leads to
   incorrectly patched code. This bug lay dormant for nearly 10 years
   but we finally hit it due to a recent change.

 - Fix lockups when running KVM guests on Power8 due to a missing check
   when a thread that's running KVM comes out of idle.

 - Fix an out-of-spec behaviour in the XIVE code (P9 interrupt
   controller).

 - Fix EEH handling of bridge MMIO windows.

 - Prevent crashes in our RFI fallback flush handler if firmware didn't
   tell us the size of the L1 cache (only seen on simulators).

Thanks to: Benjamin Herrenschmidt, Madhavan Srinivasan, Michael Neuling.

* tag 'powerpc-4.17-3' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux:
  powerpc/kvm: Fix lockups when running KVM guests on Power8
  powerpc/eeh: Fix enabling bridge MMIO windows
  powerpc/xive: Fix trying to "push" an already active pool VP
  powerpc/64s: Default l1d_size to 64K in RFI fallback flush
  powerpc/lib: Fix off-by-one in alternate feature patching
parents c2d94c52 56376c58
...@@ -807,7 +807,8 @@ static void eeh_restore_bridge_bars(struct eeh_dev *edev) ...@@ -807,7 +807,8 @@ static void eeh_restore_bridge_bars(struct eeh_dev *edev)
eeh_ops->write_config(pdn, 15*4, 4, edev->config_space[15]); eeh_ops->write_config(pdn, 15*4, 4, edev->config_space[15]);
/* PCI Command: 0x4 */ /* PCI Command: 0x4 */
eeh_ops->write_config(pdn, PCI_COMMAND, 4, edev->config_space[1]); eeh_ops->write_config(pdn, PCI_COMMAND, 4, edev->config_space[1] |
PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER);
/* Check the PCIe link is ready */ /* Check the PCIe link is ready */
eeh_bridge_check_link(edev); eeh_bridge_check_link(edev);
......
...@@ -553,12 +553,12 @@ ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_300) ...@@ -553,12 +553,12 @@ ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_300)
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
lbz r0,HSTATE_HWTHREAD_STATE(r13) lbz r0,HSTATE_HWTHREAD_STATE(r13)
cmpwi r0,KVM_HWTHREAD_IN_KERNEL cmpwi r0,KVM_HWTHREAD_IN_KERNEL
beq 1f beq 0f
li r0,KVM_HWTHREAD_IN_KERNEL li r0,KVM_HWTHREAD_IN_KERNEL
stb r0,HSTATE_HWTHREAD_STATE(r13) stb r0,HSTATE_HWTHREAD_STATE(r13)
/* Order setting hwthread_state vs. testing hwthread_req */ /* Order setting hwthread_state vs. testing hwthread_req */
sync sync
lbz r0,HSTATE_HWTHREAD_REQ(r13) 0: lbz r0,HSTATE_HWTHREAD_REQ(r13)
cmpwi r0,0 cmpwi r0,0
beq 1f beq 1f
b kvm_start_guest b kvm_start_guest
......
...@@ -890,6 +890,17 @@ static void __ref init_fallback_flush(void) ...@@ -890,6 +890,17 @@ static void __ref init_fallback_flush(void)
return; return;
l1d_size = ppc64_caches.l1d.size; l1d_size = ppc64_caches.l1d.size;
/*
* If there is no d-cache-size property in the device tree, l1d_size
* could be zero. That leads to the loop in the asm wrapping around to
* 2^64-1, and then walking off the end of the fallback area and
* eventually causing a page fault which is fatal. Just default to
* something vaguely sane.
*/
if (!l1d_size)
l1d_size = (64 * 1024);
limit = min(ppc64_bolted_size(), ppc64_rma_size); limit = min(ppc64_bolted_size(), ppc64_rma_size);
/* /*
......
...@@ -55,7 +55,7 @@ static int patch_alt_instruction(unsigned int *src, unsigned int *dest, ...@@ -55,7 +55,7 @@ static int patch_alt_instruction(unsigned int *src, unsigned int *dest,
unsigned int *target = (unsigned int *)branch_target(src); unsigned int *target = (unsigned int *)branch_target(src);
/* Branch within the section doesn't need translating */ /* Branch within the section doesn't need translating */
if (target < alt_start || target >= alt_end) { if (target < alt_start || target > alt_end) {
instr = translate_branch(dest, src); instr = translate_branch(dest, src);
if (!instr) if (!instr)
return 1; return 1;
......
...@@ -389,6 +389,10 @@ static void xive_native_setup_cpu(unsigned int cpu, struct xive_cpu *xc) ...@@ -389,6 +389,10 @@ static void xive_native_setup_cpu(unsigned int cpu, struct xive_cpu *xc)
if (xive_pool_vps == XIVE_INVALID_VP) if (xive_pool_vps == XIVE_INVALID_VP)
return; return;
/* Check if pool VP already active, if it is, pull it */
if (in_be32(xive_tima + TM_QW2_HV_POOL + TM_WORD2) & TM_QW2W2_VP)
in_be64(xive_tima + TM_SPC_PULL_POOL_CTX);
/* Enable the pool VP */ /* Enable the pool VP */
vp = xive_pool_vps + cpu; vp = xive_pool_vps + cpu;
pr_debug("CPU %d setting up pool VP 0x%x\n", cpu, vp); pr_debug("CPU %d setting up pool VP 0x%x\n", cpu, vp);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment