powerpc: Fix fatal SLB miss when restoring PPR

When restoring the PPR value, we incorrectly access the thread structure
at a time where MSR:RI is clear, which means we cannot recover from nested
faults. However the thread structure isn't covered by the "bolted" SLB
entries and thus accessing can fault.

This fixes it by splitting the code so that the PPR value is loaded into
a GPR before MSR:RI is cleared.
Signed-off-by: default avatarBenjamin Herrenschmidt <benh@kernel.crashing.org>
parent 36954dc7
...@@ -406,13 +406,6 @@ BEGIN_FTR_SECTION_NESTED(945) \ ...@@ -406,13 +406,6 @@ BEGIN_FTR_SECTION_NESTED(945) \
std ra,TASKTHREADPPR(rb); \ std ra,TASKTHREADPPR(rb); \
END_FTR_SECTION_NESTED(CPU_FTR_HAS_PPR,CPU_FTR_HAS_PPR,945) END_FTR_SECTION_NESTED(CPU_FTR_HAS_PPR,CPU_FTR_HAS_PPR,945)
#define RESTORE_PPR(ra, rb) \
BEGIN_FTR_SECTION_NESTED(946) \
ld ra,PACACURRENT(r13); \
ld rb,TASKTHREADPPR(ra); \
mtspr SPRN_PPR,rb; /* Restore PPR */ \
END_FTR_SECTION_NESTED(CPU_FTR_HAS_PPR,CPU_FTR_HAS_PPR,946)
#endif #endif
/* /*
......
...@@ -818,6 +818,12 @@ fast_exception_return: ...@@ -818,6 +818,12 @@ fast_exception_return:
andi. r0,r3,MSR_RI andi. r0,r3,MSR_RI
beq- unrecov_restore beq- unrecov_restore
/* Load PPR from thread struct before we clear MSR:RI */
BEGIN_FTR_SECTION
ld r2,PACACURRENT(r13)
ld r2,TASKTHREADPPR(r2)
END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
/* /*
* Clear RI before restoring r13. If we are returning to * Clear RI before restoring r13. If we are returning to
* userspace and we take an exception after restoring r13, * userspace and we take an exception after restoring r13,
...@@ -838,8 +844,10 @@ fast_exception_return: ...@@ -838,8 +844,10 @@ fast_exception_return:
*/ */
andi. r0,r3,MSR_PR andi. r0,r3,MSR_PR
beq 1f beq 1f
BEGIN_FTR_SECTION
mtspr SPRN_PPR,r2 /* Restore PPR */
END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
ACCOUNT_CPU_USER_EXIT(r2, r4) ACCOUNT_CPU_USER_EXIT(r2, r4)
RESTORE_PPR(r2, r4)
REST_GPR(13, r1) REST_GPR(13, r1)
1: 1:
mtspr SPRN_SRR1,r3 mtspr SPRN_SRR1,r3
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment