Commit 1b272275 authored by Linus Torvalds's avatar Linus Torvalds

Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/sparc-next

Pull sparc update from David Miller:

 1) Implement support for up to 47-bit physical addresses on sparc64.

 2) Support HAVE_CONTEXT_TRACKING on sparc64, from Kirill Tkhai.

 3) Fix Simba bridge window calculations, from Kjetil Oftedal.

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/sparc-next:
  sparc64: Implement HAVE_CONTEXT_TRACKING
  sparc64: Add self-IPI support for smp_send_reschedule()
  sparc: PCI: Fix incorrect address calculation of PCI Bridge windows on Simba-bridges
  sparc64: Encode huge PMDs using PTE encoding.
  sparc64: Move to 64-bit PGDs and PMDs.
  sparc64: Move from 4MB to 8MB huge pages.
  sparc64: Make PAGE_OFFSET variable.
  sparc64: Fix inconsistent max-physical-address defines.
  sparc64: Document the shift counts used to validate linear kernel addresses.
  sparc64: Define PAGE_OFFSET in terms of physical address bits.
  sparc64: Use PAGE_OFFSET instead of a magic constant.
  sparc64: Clean up 64-bit mmap exclusion defines.
parents 91838e2d 812cb83a
...@@ -63,6 +63,7 @@ config SPARC64 ...@@ -63,6 +63,7 @@ config SPARC64
select HAVE_DYNAMIC_FTRACE select HAVE_DYNAMIC_FTRACE
select HAVE_FTRACE_MCOUNT_RECORD select HAVE_FTRACE_MCOUNT_RECORD
select HAVE_SYSCALL_TRACEPOINTS select HAVE_SYSCALL_TRACEPOINTS
select HAVE_CONTEXT_TRACKING
select HAVE_DEBUG_KMEMLEAK select HAVE_DEBUG_KMEMLEAK
select RTC_DRV_CMOS select RTC_DRV_CMOS
select RTC_DRV_BQ4802 select RTC_DRV_BQ4802
......
...@@ -93,7 +93,6 @@ typedef struct { ...@@ -93,7 +93,6 @@ typedef struct {
spinlock_t lock; spinlock_t lock;
unsigned long sparc64_ctx_val; unsigned long sparc64_ctx_val;
unsigned long huge_pte_count; unsigned long huge_pte_count;
struct page *pgtable_page;
struct tsb_config tsb_block[MM_NUM_TSBS]; struct tsb_config tsb_block[MM_NUM_TSBS];
struct hv_tsb_descr tsb_descr[MM_NUM_TSBS]; struct hv_tsb_descr tsb_descr[MM_NUM_TSBS];
} mm_context_t; } mm_context_t;
......
...@@ -15,7 +15,10 @@ ...@@ -15,7 +15,10 @@
#define DCACHE_ALIASING_POSSIBLE #define DCACHE_ALIASING_POSSIBLE
#endif #endif
#define HPAGE_SHIFT 22 #define HPAGE_SHIFT 23
#define REAL_HPAGE_SHIFT 22
#define REAL_HPAGE_SIZE (_AC(1,UL) << REAL_HPAGE_SHIFT)
#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
#define HPAGE_SIZE (_AC(1,UL) << HPAGE_SHIFT) #define HPAGE_SIZE (_AC(1,UL) << HPAGE_SHIFT)
...@@ -53,8 +56,8 @@ extern void copy_user_page(void *to, void *from, unsigned long vaddr, struct pag ...@@ -53,8 +56,8 @@ extern void copy_user_page(void *to, void *from, unsigned long vaddr, struct pag
/* These are used to make use of C type-checking.. */ /* These are used to make use of C type-checking.. */
typedef struct { unsigned long pte; } pte_t; typedef struct { unsigned long pte; } pte_t;
typedef struct { unsigned long iopte; } iopte_t; typedef struct { unsigned long iopte; } iopte_t;
typedef struct { unsigned int pmd; } pmd_t; typedef struct { unsigned long pmd; } pmd_t;
typedef struct { unsigned int pgd; } pgd_t; typedef struct { unsigned long pgd; } pgd_t;
typedef struct { unsigned long pgprot; } pgprot_t; typedef struct { unsigned long pgprot; } pgprot_t;
#define pte_val(x) ((x).pte) #define pte_val(x) ((x).pte)
...@@ -73,8 +76,8 @@ typedef struct { unsigned long pgprot; } pgprot_t; ...@@ -73,8 +76,8 @@ typedef struct { unsigned long pgprot; } pgprot_t;
/* .. while these make it easier on the compiler */ /* .. while these make it easier on the compiler */
typedef unsigned long pte_t; typedef unsigned long pte_t;
typedef unsigned long iopte_t; typedef unsigned long iopte_t;
typedef unsigned int pmd_t; typedef unsigned long pmd_t;
typedef unsigned int pgd_t; typedef unsigned long pgd_t;
typedef unsigned long pgprot_t; typedef unsigned long pgprot_t;
#define pte_val(x) (x) #define pte_val(x) (x)
...@@ -93,18 +96,44 @@ typedef unsigned long pgprot_t; ...@@ -93,18 +96,44 @@ typedef unsigned long pgprot_t;
typedef pte_t *pgtable_t; typedef pte_t *pgtable_t;
/* These two values define the virtual address space range in which we
* must forbid 64-bit user processes from making mappings. It used to
* represent precisely the virtual address space hole present in most
* early sparc64 chips including UltraSPARC-I. But now it also is
* further constrained by the limits of our page tables, which is
* 43-bits of virtual address.
*/
#define SPARC64_VA_HOLE_TOP _AC(0xfffffc0000000000,UL)
#define SPARC64_VA_HOLE_BOTTOM _AC(0x0000040000000000,UL)
/* The next two defines specify the actual exclusion region we
* enforce, wherein we use a 4GB red zone on each side of the VA hole.
*/
#define VA_EXCLUDE_START (SPARC64_VA_HOLE_BOTTOM - (1UL << 32UL))
#define VA_EXCLUDE_END (SPARC64_VA_HOLE_TOP + (1UL << 32UL))
#define TASK_UNMAPPED_BASE (test_thread_flag(TIF_32BIT) ? \ #define TASK_UNMAPPED_BASE (test_thread_flag(TIF_32BIT) ? \
(_AC(0x0000000070000000,UL)) : \ _AC(0x0000000070000000,UL) : \
(_AC(0xfffff80000000000,UL) + (1UL << 32UL))) VA_EXCLUDE_END)
#include <asm-generic/memory_model.h> #include <asm-generic/memory_model.h>
#define PAGE_OFFSET_BY_BITS(X) (-(_AC(1,UL) << (X)))
extern unsigned long PAGE_OFFSET;
#endif /* !(__ASSEMBLY__) */ #endif /* !(__ASSEMBLY__) */
/* We used to stick this into a hard-coded global register (%g4) /* The maximum number of physical memory address bits we support, this
* but that does not make sense anymore. * is used to size various tables used to manage kernel TLB misses and
* also the sparsemem code.
*/
#define MAX_PHYS_ADDRESS_BITS 47
/* These two shift counts are used when indexing sparc64_valid_addr_bitmap
* and kpte_linear_bitmap.
*/ */
#define PAGE_OFFSET _AC(0xFFFFF80000000000,UL) #define ILOG2_4MB 22
#define ILOG2_256MB 28
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
......
This diff is collapsed.
...@@ -3,9 +3,11 @@ ...@@ -3,9 +3,11 @@
#ifdef __KERNEL__ #ifdef __KERNEL__
#include <asm/page.h>
#define SECTION_SIZE_BITS 30 #define SECTION_SIZE_BITS 30
#define MAX_PHYSADDR_BITS 42 #define MAX_PHYSADDR_BITS MAX_PHYS_ADDRESS_BITS
#define MAX_PHYSMEM_BITS 42 #define MAX_PHYSMEM_BITS MAX_PHYS_ADDRESS_BITS
#endif /* !(__KERNEL__) */ #endif /* !(__KERNEL__) */
......
...@@ -192,7 +192,7 @@ register struct thread_info *current_thread_info_reg asm("g6"); ...@@ -192,7 +192,7 @@ register struct thread_info *current_thread_info_reg asm("g6");
#define TIF_UNALIGNED 5 /* allowed to do unaligned accesses */ #define TIF_UNALIGNED 5 /* allowed to do unaligned accesses */
/* flag bit 6 is available */ /* flag bit 6 is available */
#define TIF_32BIT 7 /* 32-bit binary */ #define TIF_32BIT 7 /* 32-bit binary */
/* flag bit 8 is available */ #define TIF_NOHZ 8 /* in adaptive nohz mode */
#define TIF_SECCOMP 9 /* secure computing */ #define TIF_SECCOMP 9 /* secure computing */
#define TIF_SYSCALL_AUDIT 10 /* syscall auditing active */ #define TIF_SYSCALL_AUDIT 10 /* syscall auditing active */
#define TIF_SYSCALL_TRACEPOINT 11 /* syscall tracepoint instrumentation */ #define TIF_SYSCALL_TRACEPOINT 11 /* syscall tracepoint instrumentation */
...@@ -210,6 +210,7 @@ register struct thread_info *current_thread_info_reg asm("g6"); ...@@ -210,6 +210,7 @@ register struct thread_info *current_thread_info_reg asm("g6");
#define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED) #define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED)
#define _TIF_UNALIGNED (1<<TIF_UNALIGNED) #define _TIF_UNALIGNED (1<<TIF_UNALIGNED)
#define _TIF_32BIT (1<<TIF_32BIT) #define _TIF_32BIT (1<<TIF_32BIT)
#define _TIF_NOHZ (1<<TIF_NOHZ)
#define _TIF_SECCOMP (1<<TIF_SECCOMP) #define _TIF_SECCOMP (1<<TIF_SECCOMP)
#define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT) #define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT)
#define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT) #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
......
...@@ -142,98 +142,39 @@ extern struct tsb_phys_patch_entry __tsb_phys_patch, __tsb_phys_patch_end; ...@@ -142,98 +142,39 @@ extern struct tsb_phys_patch_entry __tsb_phys_patch, __tsb_phys_patch_end;
or REG1, %lo(swapper_pg_dir), REG1; \ or REG1, %lo(swapper_pg_dir), REG1; \
sllx VADDR, 64 - (PGDIR_SHIFT + PGDIR_BITS), REG2; \ sllx VADDR, 64 - (PGDIR_SHIFT + PGDIR_BITS), REG2; \
srlx REG2, 64 - PAGE_SHIFT, REG2; \ srlx REG2, 64 - PAGE_SHIFT, REG2; \
andn REG2, 0x3, REG2; \ andn REG2, 0x7, REG2; \
lduw [REG1 + REG2], REG1; \ ldx [REG1 + REG2], REG1; \
brz,pn REG1, FAIL_LABEL; \ brz,pn REG1, FAIL_LABEL; \
sllx VADDR, 64 - (PMD_SHIFT + PMD_BITS), REG2; \ sllx VADDR, 64 - (PMD_SHIFT + PMD_BITS), REG2; \
srlx REG2, 64 - PAGE_SHIFT, REG2; \ srlx REG2, 64 - PAGE_SHIFT, REG2; \
sllx REG1, PGD_PADDR_SHIFT, REG1; \ andn REG2, 0x7, REG2; \
andn REG2, 0x3, REG2; \ ldxa [REG1 + REG2] ASI_PHYS_USE_EC, REG1; \
lduwa [REG1 + REG2] ASI_PHYS_USE_EC, REG1; \
brz,pn REG1, FAIL_LABEL; \ brz,pn REG1, FAIL_LABEL; \
sllx VADDR, 64 - PMD_SHIFT, REG2; \ sllx VADDR, 64 - PMD_SHIFT, REG2; \
srlx REG2, 64 - (PAGE_SHIFT - 1), REG2; \ srlx REG2, 64 - PAGE_SHIFT, REG2; \
sllx REG1, PMD_PADDR_SHIFT, REG1; \
andn REG2, 0x7, REG2; \ andn REG2, 0x7, REG2; \
add REG1, REG2, REG1; add REG1, REG2, REG1;
/* These macros exists only to make the PMD translator below
* easier to read. It hides the ELF section switch for the
* sun4v code patching.
*/
#define OR_PTE_BIT_1INSN(REG, NAME) \
661: or REG, _PAGE_##NAME##_4U, REG; \
.section .sun4v_1insn_patch, "ax"; \
.word 661b; \
or REG, _PAGE_##NAME##_4V, REG; \
.previous;
#define OR_PTE_BIT_2INSN(REG, TMP, NAME) \
661: sethi %hi(_PAGE_##NAME##_4U), TMP; \
or REG, TMP, REG; \
.section .sun4v_2insn_patch, "ax"; \
.word 661b; \
mov -1, TMP; \
or REG, _PAGE_##NAME##_4V, REG; \
.previous;
/* Load into REG the PTE value for VALID, CACHE, and SZHUGE. */
#define BUILD_PTE_VALID_SZHUGE_CACHE(REG) \
661: sethi %uhi(_PAGE_VALID|_PAGE_SZHUGE_4U), REG; \
.section .sun4v_1insn_patch, "ax"; \
.word 661b; \
sethi %uhi(_PAGE_VALID), REG; \
.previous; \
sllx REG, 32, REG; \
661: or REG, _PAGE_CP_4U|_PAGE_CV_4U, REG; \
.section .sun4v_1insn_patch, "ax"; \
.word 661b; \
or REG, _PAGE_CP_4V|_PAGE_CV_4V|_PAGE_SZHUGE_4V, REG; \
.previous;
/* PMD has been loaded into REG1, interpret the value, seeing /* PMD has been loaded into REG1, interpret the value, seeing
* if it is a HUGE PMD or a normal one. If it is not valid * if it is a HUGE PMD or a normal one. If it is not valid
* then jump to FAIL_LABEL. If it is a HUGE PMD, and it * then jump to FAIL_LABEL. If it is a HUGE PMD, and it
* translates to a valid PTE, branch to PTE_LABEL. * translates to a valid PTE, branch to PTE_LABEL.
* *
* We translate the PMD by hand, one bit at a time, * We have to propagate the 4MB bit of the virtual address
* constructing the huge PTE. * because we are fabricating 8MB pages using 4MB hw pages.
*
* So we construct the PTE in REG2 as follows:
*
* 1) Extract the PMD PFN from REG1 and place it into REG2.
*
* 2) Translate PMD protection bits in REG1 into REG2, one bit
* at a time using andcc tests on REG1 and OR's into REG2.
*
* Only two bits to be concerned with here, EXEC and WRITE.
* Now REG1 is freed up and we can use it as a temporary.
*
* 3) Construct the VALID, CACHE, and page size PTE bits in
* REG1, OR with REG2 to form final PTE.
*/ */
#ifdef CONFIG_TRANSPARENT_HUGEPAGE #ifdef CONFIG_TRANSPARENT_HUGEPAGE
#define USER_PGTABLE_CHECK_PMD_HUGE(VADDR, REG1, REG2, FAIL_LABEL, PTE_LABEL) \ #define USER_PGTABLE_CHECK_PMD_HUGE(VADDR, REG1, REG2, FAIL_LABEL, PTE_LABEL) \
brz,pn REG1, FAIL_LABEL; \ brz,pn REG1, FAIL_LABEL; \
andcc REG1, PMD_ISHUGE, %g0; \ sethi %uhi(_PAGE_PMD_HUGE), REG2; \
be,pt %xcc, 700f; \ sllx REG2, 32, REG2; \
and REG1, PMD_HUGE_PRESENT|PMD_HUGE_ACCESSED, REG2; \ andcc REG1, REG2, %g0; \
cmp REG2, PMD_HUGE_PRESENT|PMD_HUGE_ACCESSED; \ be,pt %xcc, 700f; \
bne,pn %xcc, FAIL_LABEL; \ sethi %hi(4 * 1024 * 1024), REG2; \
andn REG1, PMD_HUGE_PROTBITS, REG2; \ andn REG1, REG2, REG1; \
sllx REG2, PMD_PADDR_SHIFT, REG2; \ and VADDR, REG2, REG2; \
/* REG2 now holds PFN << PAGE_SHIFT */ \ brlz,pt REG1, PTE_LABEL; \
andcc REG1, PMD_HUGE_WRITE, %g0; \ or REG1, REG2, REG1; \
bne,a,pt %xcc, 1f; \
OR_PTE_BIT_1INSN(REG2, W); \
1: andcc REG1, PMD_HUGE_EXEC, %g0; \
be,pt %xcc, 1f; \
nop; \
OR_PTE_BIT_2INSN(REG2, REG1, EXEC); \
/* REG1 can now be clobbered, build final PTE */ \
1: BUILD_PTE_VALID_SZHUGE_CACHE(REG1); \
ba,pt %xcc, PTE_LABEL; \
or REG1, REG2, REG1; \
700: 700:
#else #else
#define USER_PGTABLE_CHECK_PMD_HUGE(VADDR, REG1, REG2, FAIL_LABEL, PTE_LABEL) \ #define USER_PGTABLE_CHECK_PMD_HUGE(VADDR, REG1, REG2, FAIL_LABEL, PTE_LABEL) \
...@@ -253,18 +194,16 @@ extern struct tsb_phys_patch_entry __tsb_phys_patch, __tsb_phys_patch_end; ...@@ -253,18 +194,16 @@ extern struct tsb_phys_patch_entry __tsb_phys_patch, __tsb_phys_patch_end;
#define USER_PGTABLE_WALK_TL1(VADDR, PHYS_PGD, REG1, REG2, FAIL_LABEL) \ #define USER_PGTABLE_WALK_TL1(VADDR, PHYS_PGD, REG1, REG2, FAIL_LABEL) \
sllx VADDR, 64 - (PGDIR_SHIFT + PGDIR_BITS), REG2; \ sllx VADDR, 64 - (PGDIR_SHIFT + PGDIR_BITS), REG2; \
srlx REG2, 64 - PAGE_SHIFT, REG2; \ srlx REG2, 64 - PAGE_SHIFT, REG2; \
andn REG2, 0x3, REG2; \ andn REG2, 0x7, REG2; \
lduwa [PHYS_PGD + REG2] ASI_PHYS_USE_EC, REG1; \ ldxa [PHYS_PGD + REG2] ASI_PHYS_USE_EC, REG1; \
brz,pn REG1, FAIL_LABEL; \ brz,pn REG1, FAIL_LABEL; \
sllx VADDR, 64 - (PMD_SHIFT + PMD_BITS), REG2; \ sllx VADDR, 64 - (PMD_SHIFT + PMD_BITS), REG2; \
srlx REG2, 64 - PAGE_SHIFT, REG2; \ srlx REG2, 64 - PAGE_SHIFT, REG2; \
sllx REG1, PGD_PADDR_SHIFT, REG1; \ andn REG2, 0x7, REG2; \
andn REG2, 0x3, REG2; \ ldxa [REG1 + REG2] ASI_PHYS_USE_EC, REG1; \
lduwa [REG1 + REG2] ASI_PHYS_USE_EC, REG1; \
USER_PGTABLE_CHECK_PMD_HUGE(VADDR, REG1, REG2, FAIL_LABEL, 800f) \ USER_PGTABLE_CHECK_PMD_HUGE(VADDR, REG1, REG2, FAIL_LABEL, 800f) \
sllx VADDR, 64 - PMD_SHIFT, REG2; \ sllx VADDR, 64 - PMD_SHIFT, REG2; \
srlx REG2, 64 - (PAGE_SHIFT - 1), REG2; \ srlx REG2, 64 - PAGE_SHIFT, REG2; \
sllx REG1, PMD_PADDR_SHIFT, REG1; \
andn REG2, 0x7, REG2; \ andn REG2, 0x7, REG2; \
add REG1, REG2, REG1; \ add REG1, REG2, REG1; \
ldxa [REG1] ASI_PHYS_USE_EC, REG1; \ ldxa [REG1] ASI_PHYS_USE_EC, REG1; \
......
...@@ -88,7 +88,6 @@ extern asmlinkage void syscall_trace_leave(struct pt_regs *regs); ...@@ -88,7 +88,6 @@ extern asmlinkage void syscall_trace_leave(struct pt_regs *regs);
extern void bad_trap_tl1(struct pt_regs *regs, long lvl); extern void bad_trap_tl1(struct pt_regs *regs, long lvl);
extern void do_fpe_common(struct pt_regs *regs);
extern void do_fpieee(struct pt_regs *regs); extern void do_fpieee(struct pt_regs *regs);
extern void do_fpother(struct pt_regs *regs); extern void do_fpother(struct pt_regs *regs);
extern void do_tof(struct pt_regs *regs); extern void do_tof(struct pt_regs *regs);
......
...@@ -159,11 +159,12 @@ int kgdb_arch_handle_exception(int e_vector, int signo, int err_code, ...@@ -159,11 +159,12 @@ int kgdb_arch_handle_exception(int e_vector, int signo, int err_code,
asmlinkage void kgdb_trap(unsigned long trap_level, struct pt_regs *regs) asmlinkage void kgdb_trap(unsigned long trap_level, struct pt_regs *regs)
{ {
enum ctx_state prev_state = exception_enter();
unsigned long flags; unsigned long flags;
if (user_mode(regs)) { if (user_mode(regs)) {
bad_trap(regs, trap_level); bad_trap(regs, trap_level);
return; goto out;
} }
flushw_all(); flushw_all();
...@@ -171,6 +172,8 @@ asmlinkage void kgdb_trap(unsigned long trap_level, struct pt_regs *regs) ...@@ -171,6 +172,8 @@ asmlinkage void kgdb_trap(unsigned long trap_level, struct pt_regs *regs)
local_irq_save(flags); local_irq_save(flags);
kgdb_handle_exception(0x172, SIGTRAP, 0, regs); kgdb_handle_exception(0x172, SIGTRAP, 0, regs);
local_irq_restore(flags); local_irq_restore(flags);
out:
exception_exit(prev_state);
} }
int kgdb_arch_init(void) int kgdb_arch_init(void)
......
...@@ -8,6 +8,7 @@ ...@@ -8,6 +8,7 @@
#include <linux/module.h> #include <linux/module.h>
#include <linux/kdebug.h> #include <linux/kdebug.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/context_tracking.h>
#include <asm/signal.h> #include <asm/signal.h>
#include <asm/cacheflush.h> #include <asm/cacheflush.h>
#include <asm/uaccess.h> #include <asm/uaccess.h>
...@@ -418,12 +419,14 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self, ...@@ -418,12 +419,14 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
asmlinkage void __kprobes kprobe_trap(unsigned long trap_level, asmlinkage void __kprobes kprobe_trap(unsigned long trap_level,
struct pt_regs *regs) struct pt_regs *regs)
{ {
enum ctx_state prev_state = exception_enter();
BUG_ON(trap_level != 0x170 && trap_level != 0x171); BUG_ON(trap_level != 0x170 && trap_level != 0x171);
if (user_mode(regs)) { if (user_mode(regs)) {
local_irq_enable(); local_irq_enable();
bad_trap(regs, trap_level); bad_trap(regs, trap_level);
return; goto out;
} }
/* trap_level == 0x170 --> ta 0x70 /* trap_level == 0x170 --> ta 0x70
...@@ -433,6 +436,8 @@ asmlinkage void __kprobes kprobe_trap(unsigned long trap_level, ...@@ -433,6 +436,8 @@ asmlinkage void __kprobes kprobe_trap(unsigned long trap_level,
(trap_level == 0x170) ? "debug" : "debug_2", (trap_level == 0x170) ? "debug" : "debug_2",
regs, 0, trap_level, SIGTRAP) != NOTIFY_STOP) regs, 0, trap_level, SIGTRAP) != NOTIFY_STOP)
bad_trap(regs, trap_level); bad_trap(regs, trap_level);
out:
exception_exit(prev_state);
} }
/* Jprobes support. */ /* Jprobes support. */
......
...@@ -153,12 +153,19 @@ kvmap_dtlb_tsb4m_miss: ...@@ -153,12 +153,19 @@ kvmap_dtlb_tsb4m_miss:
/* Clear the PAGE_OFFSET top virtual bits, shift /* Clear the PAGE_OFFSET top virtual bits, shift
* down to get PFN, and make sure PFN is in range. * down to get PFN, and make sure PFN is in range.
*/ */
sllx %g4, 21, %g5 661: sllx %g4, 0, %g5
.section .page_offset_shift_patch, "ax"
.word 661b
.previous
/* Check to see if we know about valid memory at the 4MB /* Check to see if we know about valid memory at the 4MB
* chunk this physical address will reside within. * chunk this physical address will reside within.
*/ */
srlx %g5, 21 + 41, %g2 661: srlx %g5, MAX_PHYS_ADDRESS_BITS, %g2
.section .page_offset_shift_patch, "ax"
.word 661b
.previous
brnz,pn %g2, kvmap_dtlb_longpath brnz,pn %g2, kvmap_dtlb_longpath
nop nop
...@@ -176,7 +183,11 @@ valid_addr_bitmap_patch: ...@@ -176,7 +183,11 @@ valid_addr_bitmap_patch:
or %g7, %lo(sparc64_valid_addr_bitmap), %g7 or %g7, %lo(sparc64_valid_addr_bitmap), %g7
.previous .previous
srlx %g5, 21 + 22, %g2 661: srlx %g5, ILOG2_4MB, %g2
.section .page_offset_shift_patch, "ax"
.word 661b
.previous
srlx %g2, 6, %g5 srlx %g2, 6, %g5
and %g2, 63, %g2 and %g2, 63, %g2
sllx %g5, 3, %g5 sllx %g5, 3, %g5
...@@ -189,9 +200,18 @@ valid_addr_bitmap_patch: ...@@ -189,9 +200,18 @@ valid_addr_bitmap_patch:
2: sethi %hi(kpte_linear_bitmap), %g2 2: sethi %hi(kpte_linear_bitmap), %g2
/* Get the 256MB physical address index. */ /* Get the 256MB physical address index. */
sllx %g4, 21, %g5 661: sllx %g4, 0, %g5
.section .page_offset_shift_patch, "ax"
.word 661b
.previous
or %g2, %lo(kpte_linear_bitmap), %g2 or %g2, %lo(kpte_linear_bitmap), %g2
srlx %g5, 21 + 28, %g5
661: srlx %g5, ILOG2_256MB, %g5
.section .page_offset_shift_patch, "ax"
.word 661b
.previous
and %g5, (32 - 1), %g7 and %g5, (32 - 1), %g7
/* Divide by 32 to get the offset into the bitmask. */ /* Divide by 32 to get the offset into the bitmask. */
......
...@@ -398,8 +398,8 @@ static void apb_fake_ranges(struct pci_dev *dev, ...@@ -398,8 +398,8 @@ static void apb_fake_ranges(struct pci_dev *dev,
apb_calc_first_last(map, &first, &last); apb_calc_first_last(map, &first, &last);
res = bus->resource[1]; res = bus->resource[1];
res->flags = IORESOURCE_MEM; res->flags = IORESOURCE_MEM;
region.start = (first << 21); region.start = (first << 29);
region.end = (last << 21) + ((1 << 21) - 1); region.end = (last << 29) + ((1 << 29) - 1);
pcibios_bus_to_resource(dev, res, &region); pcibios_bus_to_resource(dev, res, &region);
} }
......
...@@ -31,6 +31,7 @@ ...@@ -31,6 +31,7 @@
#include <linux/elfcore.h> #include <linux/elfcore.h>
#include <linux/sysrq.h> #include <linux/sysrq.h>
#include <linux/nmi.h> #include <linux/nmi.h>
#include <linux/context_tracking.h>
#include <asm/uaccess.h> #include <asm/uaccess.h>
#include <asm/page.h> #include <asm/page.h>
...@@ -557,6 +558,7 @@ void fault_in_user_windows(void) ...@@ -557,6 +558,7 @@ void fault_in_user_windows(void)
barf: barf:
set_thread_wsaved(window + 1); set_thread_wsaved(window + 1);
user_exit();
do_exit(SIGILL); do_exit(SIGILL);
} }
......
...@@ -27,6 +27,7 @@ ...@@ -27,6 +27,7 @@
#include <trace/syscall.h> #include <trace/syscall.h>
#include <linux/compat.h> #include <linux/compat.h>
#include <linux/elf.h> #include <linux/elf.h>
#include <linux/context_tracking.h>
#include <asm/asi.h> #include <asm/asi.h>
#include <asm/pgtable.h> #include <asm/pgtable.h>
...@@ -1066,6 +1067,9 @@ asmlinkage int syscall_trace_enter(struct pt_regs *regs) ...@@ -1066,6 +1067,9 @@ asmlinkage int syscall_trace_enter(struct pt_regs *regs)
/* do the secure computing check first */ /* do the secure computing check first */
secure_computing_strict(regs->u_regs[UREG_G1]); secure_computing_strict(regs->u_regs[UREG_G1]);
if (test_thread_flag(TIF_NOHZ))
user_exit();
if (test_thread_flag(TIF_SYSCALL_TRACE)) if (test_thread_flag(TIF_SYSCALL_TRACE))
ret = tracehook_report_syscall_entry(regs); ret = tracehook_report_syscall_entry(regs);
...@@ -1086,6 +1090,9 @@ asmlinkage int syscall_trace_enter(struct pt_regs *regs) ...@@ -1086,6 +1090,9 @@ asmlinkage int syscall_trace_enter(struct pt_regs *regs)
asmlinkage void syscall_trace_leave(struct pt_regs *regs) asmlinkage void syscall_trace_leave(struct pt_regs *regs)
{ {
if (test_thread_flag(TIF_NOHZ))
user_exit();
audit_syscall_exit(regs); audit_syscall_exit(regs);
if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT))) if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
...@@ -1093,4 +1100,7 @@ asmlinkage void syscall_trace_leave(struct pt_regs *regs) ...@@ -1093,4 +1100,7 @@ asmlinkage void syscall_trace_leave(struct pt_regs *regs)
if (test_thread_flag(TIF_SYSCALL_TRACE)) if (test_thread_flag(TIF_SYSCALL_TRACE))
tracehook_report_syscall_exit(regs, 0); tracehook_report_syscall_exit(regs, 0);
if (test_thread_flag(TIF_NOHZ))
user_enter();
} }
...@@ -18,10 +18,16 @@ ...@@ -18,10 +18,16 @@
#define RTRAP_PSTATE_IRQOFF (PSTATE_TSO|PSTATE_PEF|PSTATE_PRIV) #define RTRAP_PSTATE_IRQOFF (PSTATE_TSO|PSTATE_PEF|PSTATE_PRIV)
#define RTRAP_PSTATE_AG_IRQOFF (PSTATE_TSO|PSTATE_PEF|PSTATE_PRIV|PSTATE_AG) #define RTRAP_PSTATE_AG_IRQOFF (PSTATE_TSO|PSTATE_PEF|PSTATE_PRIV|PSTATE_AG)
#ifdef CONFIG_CONTEXT_TRACKING
# define SCHEDULE_USER schedule_user
#else
# define SCHEDULE_USER schedule
#endif
.text .text
.align 32 .align 32
__handle_preemption: __handle_preemption:
call schedule call SCHEDULE_USER
wrpr %g0, RTRAP_PSTATE, %pstate wrpr %g0, RTRAP_PSTATE, %pstate
ba,pt %xcc, __handle_preemption_continue ba,pt %xcc, __handle_preemption_continue
wrpr %g0, RTRAP_PSTATE_IRQOFF, %pstate wrpr %g0, RTRAP_PSTATE_IRQOFF, %pstate
......
...@@ -23,6 +23,7 @@ ...@@ -23,6 +23,7 @@
#include <linux/tty.h> #include <linux/tty.h>
#include <linux/binfmts.h> #include <linux/binfmts.h>
#include <linux/bitops.h> #include <linux/bitops.h>
#include <linux/context_tracking.h>
#include <asm/uaccess.h> #include <asm/uaccess.h>
#include <asm/ptrace.h> #include <asm/ptrace.h>
...@@ -43,6 +44,7 @@ asmlinkage void sparc64_set_context(struct pt_regs *regs) ...@@ -43,6 +44,7 @@ asmlinkage void sparc64_set_context(struct pt_regs *regs)
{ {
struct ucontext __user *ucp = (struct ucontext __user *) struct ucontext __user *ucp = (struct ucontext __user *)
regs->u_regs[UREG_I0]; regs->u_regs[UREG_I0];
enum ctx_state prev_state = exception_enter();
mc_gregset_t __user *grp; mc_gregset_t __user *grp;
unsigned long pc, npc, tstate; unsigned long pc, npc, tstate;
unsigned long fp, i7; unsigned long fp, i7;
...@@ -129,16 +131,19 @@ asmlinkage void sparc64_set_context(struct pt_regs *regs) ...@@ -129,16 +131,19 @@ asmlinkage void sparc64_set_context(struct pt_regs *regs)
} }
if (err) if (err)
goto do_sigsegv; goto do_sigsegv;
out:
exception_exit(prev_state);
return; return;
do_sigsegv: do_sigsegv:
force_sig(SIGSEGV, current); force_sig(SIGSEGV, current);
goto out;
} }
asmlinkage void sparc64_get_context(struct pt_regs *regs) asmlinkage void sparc64_get_context(struct pt_regs *regs)
{ {
struct ucontext __user *ucp = (struct ucontext __user *) struct ucontext __user *ucp = (struct ucontext __user *)
regs->u_regs[UREG_I0]; regs->u_regs[UREG_I0];
enum ctx_state prev_state = exception_enter();
mc_gregset_t __user *grp; mc_gregset_t __user *grp;
mcontext_t __user *mcp; mcontext_t __user *mcp;
unsigned long fp, i7; unsigned long fp, i7;
...@@ -220,10 +225,12 @@ asmlinkage void sparc64_get_context(struct pt_regs *regs) ...@@ -220,10 +225,12 @@ asmlinkage void sparc64_get_context(struct pt_regs *regs)
} }
if (err) if (err)
goto do_sigsegv; goto do_sigsegv;
out:
exception_exit(prev_state);
return; return;
do_sigsegv: do_sigsegv:
force_sig(SIGSEGV, current); force_sig(SIGSEGV, current);
goto out;
} }
struct rt_signal_frame { struct rt_signal_frame {
...@@ -528,11 +535,13 @@ static void do_signal(struct pt_regs *regs, unsigned long orig_i0) ...@@ -528,11 +535,13 @@ static void do_signal(struct pt_regs *regs, unsigned long orig_i0)
void do_notify_resume(struct pt_regs *regs, unsigned long orig_i0, unsigned long thread_info_flags) void do_notify_resume(struct pt_regs *regs, unsigned long orig_i0, unsigned long thread_info_flags)
{ {
user_exit();
if (thread_info_flags & _TIF_SIGPENDING) if (thread_info_flags & _TIF_SIGPENDING)
do_signal(regs, orig_i0); do_signal(regs, orig_i0);
if (thread_info_flags & _TIF_NOTIFY_RESUME) { if (thread_info_flags & _TIF_NOTIFY_RESUME) {
clear_thread_flag(TIF_NOTIFY_RESUME); clear_thread_flag(TIF_NOTIFY_RESUME);
tracehook_notify_resume(regs); tracehook_notify_resume(regs);
} }
user_enter();
} }
...@@ -1399,8 +1399,13 @@ void __init smp_cpus_done(unsigned int max_cpus) ...@@ -1399,8 +1399,13 @@ void __init smp_cpus_done(unsigned int max_cpus)
void smp_send_reschedule(int cpu) void smp_send_reschedule(int cpu)
{ {
xcall_deliver((u64) &xcall_receive_signal, 0, 0, if (cpu == smp_processor_id()) {
cpumask_of(cpu)); WARN_ON_ONCE(preemptible());
set_softint(1 << PIL_SMP_RECEIVE_SIGNAL);
} else {
xcall_deliver((u64) &xcall_receive_signal,
0, 0, cpumask_of(cpu));
}
} }
void __irq_entry smp_receive_signal_client(int irq, struct pt_regs *regs) void __irq_entry smp_receive_signal_client(int irq, struct pt_regs *regs)
......
...@@ -182,7 +182,7 @@ sun4v_tsb_miss_common: ...@@ -182,7 +182,7 @@ sun4v_tsb_miss_common:
cmp %g5, -1 cmp %g5, -1
be,pt %xcc, 80f be,pt %xcc, 80f
nop nop
COMPUTE_TSB_PTR(%g5, %g4, HPAGE_SHIFT, %g2, %g7) COMPUTE_TSB_PTR(%g5, %g4, REAL_HPAGE_SHIFT, %g2, %g7)
/* That clobbered %g2, reload it. */ /* That clobbered %g2, reload it. */
ldxa [%g0] ASI_SCRATCHPAD, %g2 ldxa [%g0] ASI_SCRATCHPAD, %g2
......
...@@ -24,6 +24,7 @@ ...@@ -24,6 +24,7 @@
#include <linux/personality.h> #include <linux/personality.h>
#include <linux/random.h> #include <linux/random.h>
#include <linux/export.h> #include <linux/export.h>
#include <linux/context_tracking.h>
#include <asm/uaccess.h> #include <asm/uaccess.h>
#include <asm/utrap.h> #include <asm/utrap.h>
...@@ -39,9 +40,6 @@ asmlinkage unsigned long sys_getpagesize(void) ...@@ -39,9 +40,6 @@ asmlinkage unsigned long sys_getpagesize(void)
return PAGE_SIZE; return PAGE_SIZE;
} }
#define VA_EXCLUDE_START (0x0000080000000000UL - (1UL << 32UL))
#define VA_EXCLUDE_END (0xfffff80000000000UL + (1UL << 32UL))
/* Does addr --> addr+len fall within 4GB of the VA-space hole or /* Does addr --> addr+len fall within 4GB of the VA-space hole or
* overflow past the end of the 64-bit address space? * overflow past the end of the 64-bit address space?
*/ */
...@@ -499,6 +497,7 @@ asmlinkage unsigned long c_sys_nis_syscall(struct pt_regs *regs) ...@@ -499,6 +497,7 @@ asmlinkage unsigned long c_sys_nis_syscall(struct pt_regs *regs)
asmlinkage void sparc_breakpoint(struct pt_regs *regs) asmlinkage void sparc_breakpoint(struct pt_regs *regs)
{ {
enum ctx_state prev_state = exception_enter();
siginfo_t info; siginfo_t info;
if (test_thread_flag(TIF_32BIT)) { if (test_thread_flag(TIF_32BIT)) {
...@@ -517,6 +516,7 @@ asmlinkage void sparc_breakpoint(struct pt_regs *regs) ...@@ -517,6 +516,7 @@ asmlinkage void sparc_breakpoint(struct pt_regs *regs)
#ifdef DEBUG_SPARC_BREAKPOINT #ifdef DEBUG_SPARC_BREAKPOINT
printk ("TRAP: Returning to space: PC=%lx nPC=%lx\n", regs->tpc, regs->tnpc); printk ("TRAP: Returning to space: PC=%lx nPC=%lx\n", regs->tpc, regs->tnpc);
#endif #endif
exception_exit(prev_state);
} }
extern void check_pending(int signum); extern void check_pending(int signum);
......
...@@ -52,7 +52,7 @@ sys32_rt_sigreturn: ...@@ -52,7 +52,7 @@ sys32_rt_sigreturn:
#endif #endif
.align 32 .align 32
1: ldx [%g6 + TI_FLAGS], %l5 1: ldx [%g6 + TI_FLAGS], %l5
andcc %l5, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %g0 andcc %l5, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT|_TIF_NOHZ), %g0
be,pt %icc, rtrap be,pt %icc, rtrap
nop nop
call syscall_trace_leave call syscall_trace_leave
...@@ -184,7 +184,7 @@ linux_sparc_syscall32: ...@@ -184,7 +184,7 @@ linux_sparc_syscall32:
srl %i3, 0, %o3 ! IEU0 srl %i3, 0, %o3 ! IEU0
srl %i2, 0, %o2 ! IEU0 Group srl %i2, 0, %o2 ! IEU0 Group
andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %g0 andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT|_TIF_NOHZ), %g0
bne,pn %icc, linux_syscall_trace32 ! CTI bne,pn %icc, linux_syscall_trace32 ! CTI
mov %i0, %l5 ! IEU1 mov %i0, %l5 ! IEU1
5: call %l7 ! CTI Group brk forced 5: call %l7 ! CTI Group brk forced
...@@ -207,7 +207,7 @@ linux_sparc_syscall: ...@@ -207,7 +207,7 @@ linux_sparc_syscall:
mov %i3, %o3 ! IEU1 mov %i3, %o3 ! IEU1
mov %i4, %o4 ! IEU0 Group mov %i4, %o4 ! IEU0 Group
andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %g0 andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT|_TIF_NOHZ), %g0
bne,pn %icc, linux_syscall_trace ! CTI Group bne,pn %icc, linux_syscall_trace ! CTI Group
mov %i0, %l5 ! IEU0 mov %i0, %l5 ! IEU0
2: call %l7 ! CTI Group brk forced 2: call %l7 ! CTI Group brk forced
...@@ -223,7 +223,7 @@ ret_sys_call: ...@@ -223,7 +223,7 @@ ret_sys_call:
cmp %o0, -ERESTART_RESTARTBLOCK cmp %o0, -ERESTART_RESTARTBLOCK
bgeu,pn %xcc, 1f bgeu,pn %xcc, 1f
andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %g0 andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT|_TIF_NOHZ), %g0
ldx [%sp + PTREGS_OFF + PT_V9_TNPC], %l1 ! pc = npc ldx [%sp + PTREGS_OFF + PT_V9_TNPC], %l1 ! pc = npc
2: 2:
......
This diff is collapsed.
...@@ -75,7 +75,7 @@ tsb_miss_page_table_walk: ...@@ -75,7 +75,7 @@ tsb_miss_page_table_walk:
mov 512, %g7 mov 512, %g7
andn %g5, 0x7, %g5 andn %g5, 0x7, %g5
sllx %g7, %g6, %g7 sllx %g7, %g6, %g7
srlx %g4, HPAGE_SHIFT, %g6 srlx %g4, REAL_HPAGE_SHIFT, %g6
sub %g7, 1, %g7 sub %g7, 1, %g7
and %g6, %g7, %g6 and %g6, %g7, %g6
sllx %g6, 4, %g6 sllx %g6, 4, %g6
......
...@@ -21,9 +21,12 @@ ...@@ -21,9 +21,12 @@
#include <linux/bitops.h> #include <linux/bitops.h>
#include <linux/perf_event.h> #include <linux/perf_event.h>
#include <linux/ratelimit.h> #include <linux/ratelimit.h>
#include <linux/context_tracking.h>
#include <asm/fpumacro.h> #include <asm/fpumacro.h>
#include <asm/cacheflush.h> #include <asm/cacheflush.h>
#include "entry.h"
enum direction { enum direction {
load, /* ld, ldd, ldh, ldsh */ load, /* ld, ldd, ldh, ldsh */
store, /* st, std, sth, stsh */ store, /* st, std, sth, stsh */
...@@ -418,9 +421,6 @@ int handle_popc(u32 insn, struct pt_regs *regs) ...@@ -418,9 +421,6 @@ int handle_popc(u32 insn, struct pt_regs *regs)
extern void do_fpother(struct pt_regs *regs); extern void do_fpother(struct pt_regs *regs);
extern void do_privact(struct pt_regs *regs); extern void do_privact(struct pt_regs *regs);
extern void spitfire_data_access_exception(struct pt_regs *regs,
unsigned long sfsr,
unsigned long sfar);
extern void sun4v_data_access_exception(struct pt_regs *regs, extern void sun4v_data_access_exception(struct pt_regs *regs,
unsigned long addr, unsigned long addr,
unsigned long type_ctx); unsigned long type_ctx);
...@@ -578,6 +578,7 @@ void handle_ld_nf(u32 insn, struct pt_regs *regs) ...@@ -578,6 +578,7 @@ void handle_ld_nf(u32 insn, struct pt_regs *regs)
void handle_lddfmna(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr) void handle_lddfmna(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr)
{ {
enum ctx_state prev_state = exception_enter();
unsigned long pc = regs->tpc; unsigned long pc = regs->tpc;
unsigned long tstate = regs->tstate; unsigned long tstate = regs->tstate;
u32 insn; u32 insn;
...@@ -632,13 +633,16 @@ void handle_lddfmna(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr ...@@ -632,13 +633,16 @@ void handle_lddfmna(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr
sun4v_data_access_exception(regs, sfar, sfsr); sun4v_data_access_exception(regs, sfar, sfsr);
else else
spitfire_data_access_exception(regs, sfsr, sfar); spitfire_data_access_exception(regs, sfsr, sfar);
return; goto out;
} }
advance(regs); advance(regs);
out:
exception_exit(prev_state);
} }
void handle_stdfmna(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr) void handle_stdfmna(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr)
{ {
enum ctx_state prev_state = exception_enter();
unsigned long pc = regs->tpc; unsigned long pc = regs->tpc;
unsigned long tstate = regs->tstate; unsigned long tstate = regs->tstate;
u32 insn; u32 insn;
...@@ -680,7 +684,9 @@ void handle_stdfmna(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr ...@@ -680,7 +684,9 @@ void handle_stdfmna(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr
sun4v_data_access_exception(regs, sfar, sfsr); sun4v_data_access_exception(regs, sfar, sfsr);
else else
spitfire_data_access_exception(regs, sfsr, sfar); spitfire_data_access_exception(regs, sfsr, sfar);
return; goto out;
} }
advance(regs); advance(regs);
out:
exception_exit(prev_state);
} }
...@@ -122,6 +122,11 @@ SECTIONS ...@@ -122,6 +122,11 @@ SECTIONS
*(.swapper_4m_tsb_phys_patch) *(.swapper_4m_tsb_phys_patch)
__swapper_4m_tsb_phys_patch_end = .; __swapper_4m_tsb_phys_patch_end = .;
} }
.page_offset_shift_patch : {
__page_offset_shift_patch = .;
*(.page_offset_shift_patch)
__page_offset_shift_patch_end = .;
}
.popc_3insn_patch : { .popc_3insn_patch : {
__popc_3insn_patch = .; __popc_3insn_patch = .;
*(.popc_3insn_patch) *(.popc_3insn_patch)
......
...@@ -37,10 +37,10 @@ _clear_page: /* %o0=dest */ ...@@ -37,10 +37,10 @@ _clear_page: /* %o0=dest */
.globl clear_user_page .globl clear_user_page
clear_user_page: /* %o0=dest, %o1=vaddr */ clear_user_page: /* %o0=dest, %o1=vaddr */
lduw [%g6 + TI_PRE_COUNT], %o2 lduw [%g6 + TI_PRE_COUNT], %o2
sethi %uhi(PAGE_OFFSET), %g2 sethi %hi(PAGE_OFFSET), %g2
sethi %hi(PAGE_SIZE), %o4 sethi %hi(PAGE_SIZE), %o4
sllx %g2, 32, %g2 ldx [%g2 + %lo(PAGE_OFFSET)], %g2
sethi %hi(PAGE_KERNEL_LOCKED), %g3 sethi %hi(PAGE_KERNEL_LOCKED), %g3
ldx [%g3 + %lo(PAGE_KERNEL_LOCKED)], %g3 ldx [%g3 + %lo(PAGE_KERNEL_LOCKED)], %g3
......
...@@ -46,10 +46,10 @@ ...@@ -46,10 +46,10 @@
.type copy_user_page,#function .type copy_user_page,#function
copy_user_page: /* %o0=dest, %o1=src, %o2=vaddr */ copy_user_page: /* %o0=dest, %o1=src, %o2=vaddr */
lduw [%g6 + TI_PRE_COUNT], %o4 lduw [%g6 + TI_PRE_COUNT], %o4
sethi %uhi(PAGE_OFFSET), %g2 sethi %hi(PAGE_OFFSET), %g2
sethi %hi(PAGE_SIZE), %o3 sethi %hi(PAGE_SIZE), %o3
sllx %g2, 32, %g2 ldx [%g2 + %lo(PAGE_OFFSET)], %g2
sethi %hi(PAGE_KERNEL_LOCKED), %g3 sethi %hi(PAGE_KERNEL_LOCKED), %g3
ldx [%g3 + %lo(PAGE_KERNEL_LOCKED)], %g3 ldx [%g3 + %lo(PAGE_KERNEL_LOCKED)], %g3
......
...@@ -21,6 +21,7 @@ ...@@ -21,6 +21,7 @@
#include <linux/kprobes.h> #include <linux/kprobes.h>
#include <linux/kdebug.h> #include <linux/kdebug.h>
#include <linux/percpu.h> #include <linux/percpu.h>
#include <linux/context_tracking.h>
#include <asm/page.h> #include <asm/page.h>
#include <asm/pgtable.h> #include <asm/pgtable.h>
...@@ -272,6 +273,7 @@ static void noinline __kprobes bogus_32bit_fault_address(struct pt_regs *regs, ...@@ -272,6 +273,7 @@ static void noinline __kprobes bogus_32bit_fault_address(struct pt_regs *regs,
asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs) asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
{ {
enum ctx_state prev_state = exception_enter();
struct mm_struct *mm = current->mm; struct mm_struct *mm = current->mm;
struct vm_area_struct *vma; struct vm_area_struct *vma;
unsigned int insn = 0; unsigned int insn = 0;
...@@ -282,7 +284,7 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs) ...@@ -282,7 +284,7 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
fault_code = get_thread_fault_code(); fault_code = get_thread_fault_code();
if (notify_page_fault(regs)) if (notify_page_fault(regs))
return; goto exit_exception;
si_code = SEGV_MAPERR; si_code = SEGV_MAPERR;
address = current_thread_info()->fault_address; address = current_thread_info()->fault_address;
...@@ -313,7 +315,7 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs) ...@@ -313,7 +315,7 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
/* Valid, no problems... */ /* Valid, no problems... */
} else { } else {
bad_kernel_pc(regs, address); bad_kernel_pc(regs, address);
return; goto exit_exception;
} }
} else } else
flags |= FAULT_FLAG_USER; flags |= FAULT_FLAG_USER;
...@@ -430,7 +432,7 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs) ...@@ -430,7 +432,7 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
fault = handle_mm_fault(mm, vma, address, flags); fault = handle_mm_fault(mm, vma, address, flags);
if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
return; goto exit_exception;
if (unlikely(fault & VM_FAULT_ERROR)) { if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM) if (fault & VM_FAULT_OOM)
...@@ -482,6 +484,8 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs) ...@@ -482,6 +484,8 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
} }
#endif #endif
exit_exception:
exception_exit(prev_state);
return; return;
/* /*
...@@ -494,7 +498,7 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs) ...@@ -494,7 +498,7 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
handle_kernel_fault: handle_kernel_fault:
do_kernel_fault(regs, si_code, fault_code, insn, address); do_kernel_fault(regs, si_code, fault_code, insn, address);
return; goto exit_exception;
/* /*
* We ran out of memory, or some other thing happened to us that made * We ran out of memory, or some other thing happened to us that made
...@@ -505,7 +509,7 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs) ...@@ -505,7 +509,7 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
up_read(&mm->mmap_sem); up_read(&mm->mmap_sem);
if (!(regs->tstate & TSTATE_PRIV)) { if (!(regs->tstate & TSTATE_PRIV)) {
pagefault_out_of_memory(); pagefault_out_of_memory();
return; goto exit_exception;
} }
goto handle_kernel_fault; goto handle_kernel_fault;
......
...@@ -71,13 +71,12 @@ static int gup_huge_pmd(pmd_t *pmdp, pmd_t pmd, unsigned long addr, ...@@ -71,13 +71,12 @@ static int gup_huge_pmd(pmd_t *pmdp, pmd_t pmd, unsigned long addr,
int *nr) int *nr)
{ {
struct page *head, *page, *tail; struct page *head, *page, *tail;
u32 mask;
int refs; int refs;
mask = PMD_HUGE_PRESENT; if (!pmd_large(pmd))
if (write) return 0;
mask |= PMD_HUGE_WRITE;
if ((pmd_val(pmd) & mask) != mask) if (write && !pmd_write(pmd))
return 0; return 0;
refs = 0; refs = 0;
......
...@@ -21,8 +21,6 @@ ...@@ -21,8 +21,6 @@
/* Slightly simplified from the non-hugepage variant because by /* Slightly simplified from the non-hugepage variant because by
* definition we don't have to worry about any page coloring stuff * definition we don't have to worry about any page coloring stuff
*/ */
#define VA_EXCLUDE_START (0x0000080000000000UL - (1UL << 32UL))
#define VA_EXCLUDE_END (0xfffff80000000000UL + (1UL << 32UL))
static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *filp, static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *filp,
unsigned long addr, unsigned long addr,
......
This diff is collapsed.
#ifndef _SPARC64_MM_INIT_H #ifndef _SPARC64_MM_INIT_H
#define _SPARC64_MM_INIT_H #define _SPARC64_MM_INIT_H
#include <asm/page.h>
/* Most of the symbols in this file are defined in init.c and /* Most of the symbols in this file are defined in init.c and
* marked non-static so that assembler code can get at them. * marked non-static so that assembler code can get at them.
*/ */
#define MAX_PHYS_ADDRESS (1UL << 41UL) #define MAX_PHYS_ADDRESS (1UL << MAX_PHYS_ADDRESS_BITS)
#define KPTE_BITMAP_CHUNK_SZ (256UL * 1024UL * 1024UL) #define KPTE_BITMAP_CHUNK_SZ (256UL * 1024UL * 1024UL)
#define KPTE_BITMAP_BYTES \ #define KPTE_BITMAP_BYTES \
((MAX_PHYS_ADDRESS / KPTE_BITMAP_CHUNK_SZ) / 4) ((MAX_PHYS_ADDRESS / KPTE_BITMAP_CHUNK_SZ) / 4)
......
...@@ -161,8 +161,8 @@ void set_pmd_at(struct mm_struct *mm, unsigned long addr, ...@@ -161,8 +161,8 @@ void set_pmd_at(struct mm_struct *mm, unsigned long addr,
if (mm == &init_mm) if (mm == &init_mm)
return; return;
if ((pmd_val(pmd) ^ pmd_val(orig)) & PMD_ISHUGE) { if ((pmd_val(pmd) ^ pmd_val(orig)) & _PAGE_PMD_HUGE) {
if (pmd_val(pmd) & PMD_ISHUGE) if (pmd_val(pmd) & _PAGE_PMD_HUGE)
mm->context.huge_pte_count++; mm->context.huge_pte_count++;
else else
mm->context.huge_pte_count--; mm->context.huge_pte_count--;
...@@ -178,13 +178,16 @@ void set_pmd_at(struct mm_struct *mm, unsigned long addr, ...@@ -178,13 +178,16 @@ void set_pmd_at(struct mm_struct *mm, unsigned long addr,
} }
if (!pmd_none(orig)) { if (!pmd_none(orig)) {
bool exec = ((pmd_val(orig) & PMD_HUGE_EXEC) != 0); pte_t orig_pte = __pte(pmd_val(orig));
bool exec = pte_exec(orig_pte);
addr &= HPAGE_MASK; addr &= HPAGE_MASK;
if (pmd_val(orig) & PMD_ISHUGE) if (pmd_trans_huge(orig)) {
tlb_batch_add_one(mm, addr, exec); tlb_batch_add_one(mm, addr, exec);
else tlb_batch_add_one(mm, addr + REAL_HPAGE_SIZE, exec);
} else {
tlb_batch_pmd_scan(mm, addr, orig, exec); tlb_batch_pmd_scan(mm, addr, orig, exec);
}
} }
} }
......
...@@ -87,7 +87,7 @@ void flush_tsb_user(struct tlb_batch *tb) ...@@ -87,7 +87,7 @@ void flush_tsb_user(struct tlb_batch *tb)
nentries = mm->context.tsb_block[MM_TSB_HUGE].tsb_nentries; nentries = mm->context.tsb_block[MM_TSB_HUGE].tsb_nentries;
if (tlb_type == cheetah_plus || tlb_type == hypervisor) if (tlb_type == cheetah_plus || tlb_type == hypervisor)
base = __pa(base); base = __pa(base);
__flush_tsb_one(tb, HPAGE_SHIFT, base, nentries); __flush_tsb_one(tb, REAL_HPAGE_SHIFT, base, nentries);
} }
#endif #endif
spin_unlock_irqrestore(&mm->context.lock, flags); spin_unlock_irqrestore(&mm->context.lock, flags);
...@@ -111,7 +111,7 @@ void flush_tsb_user_page(struct mm_struct *mm, unsigned long vaddr) ...@@ -111,7 +111,7 @@ void flush_tsb_user_page(struct mm_struct *mm, unsigned long vaddr)
nentries = mm->context.tsb_block[MM_TSB_HUGE].tsb_nentries; nentries = mm->context.tsb_block[MM_TSB_HUGE].tsb_nentries;
if (tlb_type == cheetah_plus || tlb_type == hypervisor) if (tlb_type == cheetah_plus || tlb_type == hypervisor)
base = __pa(base); base = __pa(base);
__flush_tsb_one_entry(base, vaddr, HPAGE_SHIFT, nentries); __flush_tsb_one_entry(base, vaddr, REAL_HPAGE_SHIFT, nentries);
} }
#endif #endif
spin_unlock_irqrestore(&mm->context.lock, flags); spin_unlock_irqrestore(&mm->context.lock, flags);
...@@ -472,8 +472,6 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm) ...@@ -472,8 +472,6 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
mm->context.huge_pte_count = 0; mm->context.huge_pte_count = 0;
#endif #endif
mm->context.pgtable_page = NULL;
/* copy_mm() copies over the parent's mm_struct before calling /* copy_mm() copies over the parent's mm_struct before calling
* us, so we need to zero out the TSB pointer or else tsb_grow() * us, so we need to zero out the TSB pointer or else tsb_grow()
* will be confused and think there is an older TSB to free up. * will be confused and think there is an older TSB to free up.
...@@ -512,17 +510,10 @@ static void tsb_destroy_one(struct tsb_config *tp) ...@@ -512,17 +510,10 @@ static void tsb_destroy_one(struct tsb_config *tp)
void destroy_context(struct mm_struct *mm) void destroy_context(struct mm_struct *mm)
{ {
unsigned long flags, i; unsigned long flags, i;
struct page *page;
for (i = 0; i < MM_NUM_TSBS; i++) for (i = 0; i < MM_NUM_TSBS; i++)
tsb_destroy_one(&mm->context.tsb_block[i]); tsb_destroy_one(&mm->context.tsb_block[i]);
page = mm->context.pgtable_page;
if (page && put_page_testzero(page)) {
pgtable_page_dtor(page);
free_hot_cold_page(page, 0);
}
spin_lock_irqsave(&ctx_alloc_lock, flags); spin_lock_irqsave(&ctx_alloc_lock, flags);
if (CTX_VALID(mm->context)) { if (CTX_VALID(mm->context)) {
......
...@@ -153,10 +153,10 @@ __spitfire_flush_tlb_mm_slow: ...@@ -153,10 +153,10 @@ __spitfire_flush_tlb_mm_slow:
.globl __flush_icache_page .globl __flush_icache_page
__flush_icache_page: /* %o0 = phys_page */ __flush_icache_page: /* %o0 = phys_page */
srlx %o0, PAGE_SHIFT, %o0 srlx %o0, PAGE_SHIFT, %o0
sethi %uhi(PAGE_OFFSET), %g1 sethi %hi(PAGE_OFFSET), %g1
sllx %o0, PAGE_SHIFT, %o0 sllx %o0, PAGE_SHIFT, %o0
sethi %hi(PAGE_SIZE), %g2 sethi %hi(PAGE_SIZE), %g2
sllx %g1, 32, %g1 ldx [%g1 + %lo(PAGE_OFFSET)], %g1
add %o0, %g1, %o0 add %o0, %g1, %o0
1: subcc %g2, 32, %g2 1: subcc %g2, 32, %g2
bne,pt %icc, 1b bne,pt %icc, 1b
...@@ -178,8 +178,8 @@ __flush_icache_page: /* %o0 = phys_page */ ...@@ -178,8 +178,8 @@ __flush_icache_page: /* %o0 = phys_page */
.align 64 .align 64
.globl __flush_dcache_page .globl __flush_dcache_page
__flush_dcache_page: /* %o0=kaddr, %o1=flush_icache */ __flush_dcache_page: /* %o0=kaddr, %o1=flush_icache */
sethi %uhi(PAGE_OFFSET), %g1 sethi %hi(PAGE_OFFSET), %g1
sllx %g1, 32, %g1 ldx [%g1 + %lo(PAGE_OFFSET)], %g1
sub %o0, %g1, %o0 ! physical address sub %o0, %g1, %o0 ! physical address
srlx %o0, 11, %o0 ! make D-cache TAG srlx %o0, 11, %o0 ! make D-cache TAG
sethi %hi(1 << 14), %o2 ! D-cache size sethi %hi(1 << 14), %o2 ! D-cache size
...@@ -287,8 +287,8 @@ __cheetah_flush_tlb_pending: /* 27 insns */ ...@@ -287,8 +287,8 @@ __cheetah_flush_tlb_pending: /* 27 insns */
#ifdef DCACHE_ALIASING_POSSIBLE #ifdef DCACHE_ALIASING_POSSIBLE
__cheetah_flush_dcache_page: /* 11 insns */ __cheetah_flush_dcache_page: /* 11 insns */
sethi %uhi(PAGE_OFFSET), %g1 sethi %hi(PAGE_OFFSET), %g1
sllx %g1, 32, %g1 ldx [%g1 + %lo(PAGE_OFFSET)], %g1
sub %o0, %g1, %o0 sub %o0, %g1, %o0
sethi %hi(PAGE_SIZE), %o4 sethi %hi(PAGE_SIZE), %o4
1: subcc %o4, (1 << 5), %o4 1: subcc %o4, (1 << 5), %o4
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment