Commit 2a28ab3d authored by David S. Miller's avatar David S. Miller Committed by Greg Kroah-Hartman

sparc64: Fix illegal relative branches in hypervisor patched TLB code.

[ Upstream commit b429ae4d ]

When we copy code over to patch another piece of code, we can only use
PC-relative branches that target code within that piece of code.

Such PC-relative branches cannot be made to external symbols because
the patch moves the location of the code and thus modifies the
relative address of external symbols.

Use an absolute jmpl to fix this problem.
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent f4fb552a
...@@ -30,7 +30,7 @@ ...@@ -30,7 +30,7 @@
.text .text
.align 32 .align 32
.globl __flush_tlb_mm .globl __flush_tlb_mm
__flush_tlb_mm: /* 18 insns */ __flush_tlb_mm: /* 19 insns */
/* %o0=(ctx & TAG_CONTEXT_BITS), %o1=SECONDARY_CONTEXT */ /* %o0=(ctx & TAG_CONTEXT_BITS), %o1=SECONDARY_CONTEXT */
ldxa [%o1] ASI_DMMU, %g2 ldxa [%o1] ASI_DMMU, %g2
cmp %g2, %o0 cmp %g2, %o0
...@@ -81,7 +81,7 @@ __flush_tlb_page: /* 22 insns */ ...@@ -81,7 +81,7 @@ __flush_tlb_page: /* 22 insns */
.align 32 .align 32
.globl __flush_tlb_pending .globl __flush_tlb_pending
__flush_tlb_pending: /* 26 insns */ __flush_tlb_pending: /* 27 insns */
/* %o0 = context, %o1 = nr, %o2 = vaddrs[] */ /* %o0 = context, %o1 = nr, %o2 = vaddrs[] */
rdpr %pstate, %g7 rdpr %pstate, %g7
sllx %o1, 3, %o1 sllx %o1, 3, %o1
...@@ -113,7 +113,7 @@ __flush_tlb_pending: /* 26 insns */ ...@@ -113,7 +113,7 @@ __flush_tlb_pending: /* 26 insns */
.align 32 .align 32
.globl __flush_tlb_kernel_range .globl __flush_tlb_kernel_range
__flush_tlb_kernel_range: /* 16 insns */ __flush_tlb_kernel_range: /* 19 insns */
/* %o0=start, %o1=end */ /* %o0=start, %o1=end */
cmp %o0, %o1 cmp %o0, %o1
be,pn %xcc, 2f be,pn %xcc, 2f
...@@ -131,6 +131,9 @@ __flush_tlb_kernel_range: /* 16 insns */ ...@@ -131,6 +131,9 @@ __flush_tlb_kernel_range: /* 16 insns */
retl retl
nop nop
nop nop
nop
nop
nop
__spitfire_flush_tlb_mm_slow: __spitfire_flush_tlb_mm_slow:
rdpr %pstate, %g1 rdpr %pstate, %g1
...@@ -309,19 +312,28 @@ __hypervisor_tlb_tl0_error: ...@@ -309,19 +312,28 @@ __hypervisor_tlb_tl0_error:
ret ret
restore restore
__hypervisor_flush_tlb_mm: /* 10 insns */ __hypervisor_flush_tlb_mm: /* 19 insns */
mov %o0, %o2 /* ARG2: mmu context */ mov %o0, %o2 /* ARG2: mmu context */
mov 0, %o0 /* ARG0: CPU lists unimplemented */ mov 0, %o0 /* ARG0: CPU lists unimplemented */
mov 0, %o1 /* ARG1: CPU lists unimplemented */ mov 0, %o1 /* ARG1: CPU lists unimplemented */
mov HV_MMU_ALL, %o3 /* ARG3: flags */ mov HV_MMU_ALL, %o3 /* ARG3: flags */
mov HV_FAST_MMU_DEMAP_CTX, %o5 mov HV_FAST_MMU_DEMAP_CTX, %o5
ta HV_FAST_TRAP ta HV_FAST_TRAP
brnz,pn %o0, __hypervisor_tlb_tl0_error brnz,pn %o0, 1f
mov HV_FAST_MMU_DEMAP_CTX, %o1 mov HV_FAST_MMU_DEMAP_CTX, %o1
retl retl
nop nop
1: sethi %hi(__hypervisor_tlb_tl0_error), %o5
jmpl %o5 + %lo(__hypervisor_tlb_tl0_error), %g0
nop
nop
nop
nop
nop
nop
nop
__hypervisor_flush_tlb_page: /* 11 insns */ __hypervisor_flush_tlb_page: /* 22 insns */
/* %o0 = context, %o1 = vaddr */ /* %o0 = context, %o1 = vaddr */
mov %o0, %g2 mov %o0, %g2
mov %o1, %o0 /* ARG0: vaddr + IMMU-bit */ mov %o1, %o0 /* ARG0: vaddr + IMMU-bit */
...@@ -330,10 +342,21 @@ __hypervisor_flush_tlb_page: /* 11 insns */ ...@@ -330,10 +342,21 @@ __hypervisor_flush_tlb_page: /* 11 insns */
srlx %o0, PAGE_SHIFT, %o0 srlx %o0, PAGE_SHIFT, %o0
sllx %o0, PAGE_SHIFT, %o0 sllx %o0, PAGE_SHIFT, %o0
ta HV_MMU_UNMAP_ADDR_TRAP ta HV_MMU_UNMAP_ADDR_TRAP
brnz,pn %o0, __hypervisor_tlb_tl0_error brnz,pn %o0, 1f
mov HV_MMU_UNMAP_ADDR_TRAP, %o1 mov HV_MMU_UNMAP_ADDR_TRAP, %o1
retl retl
nop nop
1: sethi %hi(__hypervisor_tlb_tl0_error), %o2
jmpl %o2 + %lo(__hypervisor_tlb_tl0_error), %g0
nop
nop
nop
nop
nop
nop
nop
nop
nop
__hypervisor_flush_tlb_pending: /* 16 insns */ __hypervisor_flush_tlb_pending: /* 16 insns */
/* %o0 = context, %o1 = nr, %o2 = vaddrs[] */ /* %o0 = context, %o1 = nr, %o2 = vaddrs[] */
...@@ -347,14 +370,25 @@ __hypervisor_flush_tlb_pending: /* 16 insns */ ...@@ -347,14 +370,25 @@ __hypervisor_flush_tlb_pending: /* 16 insns */
srlx %o0, PAGE_SHIFT, %o0 srlx %o0, PAGE_SHIFT, %o0
sllx %o0, PAGE_SHIFT, %o0 sllx %o0, PAGE_SHIFT, %o0
ta HV_MMU_UNMAP_ADDR_TRAP ta HV_MMU_UNMAP_ADDR_TRAP
brnz,pn %o0, __hypervisor_tlb_tl0_error brnz,pn %o0, 1f
mov HV_MMU_UNMAP_ADDR_TRAP, %o1 mov HV_MMU_UNMAP_ADDR_TRAP, %o1
brnz,pt %g1, 1b brnz,pt %g1, 1b
nop nop
retl retl
nop nop
1: sethi %hi(__hypervisor_tlb_tl0_error), %o2
jmpl %o2 + %lo(__hypervisor_tlb_tl0_error), %g0
nop
nop
nop
nop
nop
nop
nop
nop
nop
__hypervisor_flush_tlb_kernel_range: /* 16 insns */ __hypervisor_flush_tlb_kernel_range: /* 19 insns */
/* %o0=start, %o1=end */ /* %o0=start, %o1=end */
cmp %o0, %o1 cmp %o0, %o1
be,pn %xcc, 2f be,pn %xcc, 2f
...@@ -366,12 +400,15 @@ __hypervisor_flush_tlb_kernel_range: /* 16 insns */ ...@@ -366,12 +400,15 @@ __hypervisor_flush_tlb_kernel_range: /* 16 insns */
mov 0, %o1 /* ARG1: mmu context */ mov 0, %o1 /* ARG1: mmu context */
mov HV_MMU_ALL, %o2 /* ARG2: flags */ mov HV_MMU_ALL, %o2 /* ARG2: flags */
ta HV_MMU_UNMAP_ADDR_TRAP ta HV_MMU_UNMAP_ADDR_TRAP
brnz,pn %o0, __hypervisor_tlb_tl0_error brnz,pn %o0, 3f
mov HV_MMU_UNMAP_ADDR_TRAP, %o1 mov HV_MMU_UNMAP_ADDR_TRAP, %o1
brnz,pt %g2, 1b brnz,pt %g2, 1b
sub %g2, %g3, %g2 sub %g2, %g3, %g2
2: retl 2: retl
nop nop
3: sethi %hi(__hypervisor_tlb_tl0_error), %o2
jmpl %o2 + %lo(__hypervisor_tlb_tl0_error), %g0
nop
#ifdef DCACHE_ALIASING_POSSIBLE #ifdef DCACHE_ALIASING_POSSIBLE
/* XXX Niagara and friends have an 8K cache, so no aliasing is /* XXX Niagara and friends have an 8K cache, so no aliasing is
...@@ -819,28 +856,28 @@ hypervisor_patch_cachetlbops: ...@@ -819,28 +856,28 @@ hypervisor_patch_cachetlbops:
sethi %hi(__hypervisor_flush_tlb_mm), %o1 sethi %hi(__hypervisor_flush_tlb_mm), %o1
or %o1, %lo(__hypervisor_flush_tlb_mm), %o1 or %o1, %lo(__hypervisor_flush_tlb_mm), %o1
call tlb_patch_one call tlb_patch_one
mov 10, %o2 mov 19, %o2
sethi %hi(__flush_tlb_page), %o0 sethi %hi(__flush_tlb_page), %o0
or %o0, %lo(__flush_tlb_page), %o0 or %o0, %lo(__flush_tlb_page), %o0
sethi %hi(__hypervisor_flush_tlb_page), %o1 sethi %hi(__hypervisor_flush_tlb_page), %o1
or %o1, %lo(__hypervisor_flush_tlb_page), %o1 or %o1, %lo(__hypervisor_flush_tlb_page), %o1
call tlb_patch_one call tlb_patch_one
mov 11, %o2 mov 22, %o2
sethi %hi(__flush_tlb_pending), %o0 sethi %hi(__flush_tlb_pending), %o0
or %o0, %lo(__flush_tlb_pending), %o0 or %o0, %lo(__flush_tlb_pending), %o0
sethi %hi(__hypervisor_flush_tlb_pending), %o1 sethi %hi(__hypervisor_flush_tlb_pending), %o1
or %o1, %lo(__hypervisor_flush_tlb_pending), %o1 or %o1, %lo(__hypervisor_flush_tlb_pending), %o1
call tlb_patch_one call tlb_patch_one
mov 16, %o2 mov 27, %o2
sethi %hi(__flush_tlb_kernel_range), %o0 sethi %hi(__flush_tlb_kernel_range), %o0
or %o0, %lo(__flush_tlb_kernel_range), %o0 or %o0, %lo(__flush_tlb_kernel_range), %o0
sethi %hi(__hypervisor_flush_tlb_kernel_range), %o1 sethi %hi(__hypervisor_flush_tlb_kernel_range), %o1
or %o1, %lo(__hypervisor_flush_tlb_kernel_range), %o1 or %o1, %lo(__hypervisor_flush_tlb_kernel_range), %o1
call tlb_patch_one call tlb_patch_one
mov 16, %o2 mov 19, %o2
#ifdef DCACHE_ALIASING_POSSIBLE #ifdef DCACHE_ALIASING_POSSIBLE
sethi %hi(__flush_dcache_page), %o0 sethi %hi(__flush_dcache_page), %o0
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment