Commit e7bd807e authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'm68k-for-v5.15-tag3' of git://git.kernel.org/pub/scm/linux/kernel/git/geert/linux-m68k

Pull more m68k updates from Geert Uytterhoeven:

 - signal handling fixes

 - removal of set_fs()

[ The set_fs removal isn't strictly a fix, but it's been pending for a
  while and is very welcome. The signal handling fixes resolved an issue
  that was incorrectly attributed to the set_fs changes    - Linus ]

* tag 'm68k-for-v5.15-tag3' of git://git.kernel.org/pub/scm/linux/kernel/git/geert/linux-m68k:
  m68k: Remove set_fs()
  m68k: Provide __{get,put}_kernel_nofault
  m68k: Factor the 8-byte lowlevel {get,put}_user code into helpers
  m68k: Use BUILD_BUG for passing invalid sizes to get_user/put_user
  m68k: Remove the 030 case in virt_to_phys_slow
  m68k: Document that access_ok is broken for !CONFIG_CPU_HAS_ADDRESS_SPACES
  m68k: Leave stack mangling to asm wrapper of sigreturn()
  m68k: Update ->thread.esp0 before calling syscall_trace() in ret_from_signal
  m68k: Handle arrivals of multiple signals correctly
parents dca50f08 9fde0348
...@@ -15,7 +15,6 @@ ...@@ -15,7 +15,6 @@
#include <asm/unistd.h> #include <asm/unistd.h>
#include <asm/errno.h> #include <asm/errno.h>
#include <asm/setup.h> #include <asm/setup.h>
#include <asm/segment.h>
#include <asm/traps.h> #include <asm/traps.h>
#include <asm/asm-offsets.h> #include <asm/asm-offsets.h>
#include <asm/entry.h> #include <asm/entry.h>
...@@ -25,7 +24,6 @@ ...@@ -25,7 +24,6 @@
.globl system_call .globl system_call
.globl resume .globl resume
.globl ret_from_exception .globl ret_from_exception
.globl ret_from_signal
.globl sys_call_table .globl sys_call_table
.globl bad_interrupt .globl bad_interrupt
.globl inthandler1 .globl inthandler1
...@@ -59,8 +57,6 @@ do_trace: ...@@ -59,8 +57,6 @@ do_trace:
subql #4,%sp /* dummy return address */ subql #4,%sp /* dummy return address */
SAVE_SWITCH_STACK SAVE_SWITCH_STACK
jbsr syscall_trace_leave jbsr syscall_trace_leave
ret_from_signal:
RESTORE_SWITCH_STACK RESTORE_SWITCH_STACK
addql #4,%sp addql #4,%sp
jra ret_from_exception jra ret_from_exception
......
...@@ -29,7 +29,6 @@ config M68K ...@@ -29,7 +29,6 @@ config M68K
select NO_DMA if !MMU && !COLDFIRE select NO_DMA if !MMU && !COLDFIRE
select OLD_SIGACTION select OLD_SIGACTION
select OLD_SIGSUSPEND3 select OLD_SIGSUSPEND3
select SET_FS
select UACCESS_MEMCPY if !MMU select UACCESS_MEMCPY if !MMU
select VIRT_TO_BUS select VIRT_TO_BUS
select ZONE_DMA select ZONE_DMA
......
...@@ -31,7 +31,6 @@ ...@@ -31,7 +31,6 @@
#include <asm/thread_info.h> #include <asm/thread_info.h>
#include <asm/errno.h> #include <asm/errno.h>
#include <asm/setup.h> #include <asm/setup.h>
#include <asm/segment.h>
#include <asm/asm-offsets.h> #include <asm/asm-offsets.h>
#include <asm/entry.h> #include <asm/entry.h>
...@@ -51,7 +50,6 @@ sw_usp: ...@@ -51,7 +50,6 @@ sw_usp:
.globl system_call .globl system_call
.globl resume .globl resume
.globl ret_from_exception .globl ret_from_exception
.globl ret_from_signal
.globl sys_call_table .globl sys_call_table
.globl inthandler .globl inthandler
...@@ -98,8 +96,6 @@ ENTRY(system_call) ...@@ -98,8 +96,6 @@ ENTRY(system_call)
subql #4,%sp /* dummy return address */ subql #4,%sp /* dummy return address */
SAVE_SWITCH_STACK SAVE_SWITCH_STACK
jbsr syscall_trace_leave jbsr syscall_trace_leave
ret_from_signal:
RESTORE_SWITCH_STACK RESTORE_SWITCH_STACK
addql #4,%sp addql #4,%sp
......
...@@ -9,7 +9,6 @@ ...@@ -9,7 +9,6 @@
#define __ASM_M68K_PROCESSOR_H #define __ASM_M68K_PROCESSOR_H
#include <linux/thread_info.h> #include <linux/thread_info.h>
#include <asm/segment.h>
#include <asm/fpu.h> #include <asm/fpu.h>
#include <asm/ptrace.h> #include <asm/ptrace.h>
...@@ -75,11 +74,37 @@ static inline void wrusp(unsigned long usp) ...@@ -75,11 +74,37 @@ static inline void wrusp(unsigned long usp)
#define TASK_UNMAPPED_BASE 0 #define TASK_UNMAPPED_BASE 0
#endif #endif
/* Address spaces (or Function Codes in Motorola lingo) */
#define USER_DATA 1
#define USER_PROGRAM 2
#define SUPER_DATA 5
#define SUPER_PROGRAM 6
#define CPU_SPACE 7
#ifdef CONFIG_CPU_HAS_ADDRESS_SPACES
/*
* Set the SFC/DFC registers for special MM operations. For most normal
* operation these remain set to USER_DATA for the uaccess routines.
*/
static inline void set_fc(unsigned long val)
{
WARN_ON_ONCE(in_interrupt());
__asm__ __volatile__ ("movec %0,%/sfc\n\t"
"movec %0,%/dfc\n\t"
: /* no outputs */ : "r" (val) : "memory");
}
#else
static inline void set_fc(unsigned long val)
{
}
#endif /* CONFIG_CPU_HAS_ADDRESS_SPACES */
struct thread_struct { struct thread_struct {
unsigned long ksp; /* kernel stack pointer */ unsigned long ksp; /* kernel stack pointer */
unsigned long usp; /* user stack pointer */ unsigned long usp; /* user stack pointer */
unsigned short sr; /* saved status register */ unsigned short sr; /* saved status register */
unsigned short fs; /* saved fs (sfc, dfc) */ unsigned short fc; /* saved fc (sfc, dfc) */
unsigned long crp[2]; /* cpu root pointer */ unsigned long crp[2]; /* cpu root pointer */
unsigned long esp0; /* points to SR of stack frame */ unsigned long esp0; /* points to SR of stack frame */
unsigned long faddr; /* info about last fault */ unsigned long faddr; /* info about last fault */
...@@ -92,7 +117,7 @@ struct thread_struct { ...@@ -92,7 +117,7 @@ struct thread_struct {
#define INIT_THREAD { \ #define INIT_THREAD { \
.ksp = sizeof(init_stack) + (unsigned long) init_stack, \ .ksp = sizeof(init_stack) + (unsigned long) init_stack, \
.sr = PS_S, \ .sr = PS_S, \
.fs = __KERNEL_DS, \ .fc = USER_DATA, \
} }
/* /*
......
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _M68K_SEGMENT_H
#define _M68K_SEGMENT_H
/* define constants */
/* Address spaces (FC0-FC2) */
#define USER_DATA (1)
#ifndef __USER_DS
#define __USER_DS (USER_DATA)
#endif
#define USER_PROGRAM (2)
#define SUPER_DATA (5)
#ifndef __KERNEL_DS
#define __KERNEL_DS (SUPER_DATA)
#endif
#define SUPER_PROGRAM (6)
#define CPU_SPACE (7)
#ifndef __ASSEMBLY__
typedef struct {
unsigned long seg;
} mm_segment_t;
#define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })
#ifdef CONFIG_CPU_HAS_ADDRESS_SPACES
/*
* Get/set the SFC/DFC registers for MOVES instructions
*/
#define USER_DS MAKE_MM_SEG(__USER_DS)
#define KERNEL_DS MAKE_MM_SEG(__KERNEL_DS)
static inline mm_segment_t get_fs(void)
{
mm_segment_t _v;
__asm__ ("movec %/dfc,%0":"=r" (_v.seg):);
return _v;
}
static inline void set_fs(mm_segment_t val)
{
__asm__ __volatile__ ("movec %0,%/sfc\n\t"
"movec %0,%/dfc\n\t"
: /* no outputs */ : "r" (val.seg) : "memory");
}
#else
#define USER_DS MAKE_MM_SEG(TASK_SIZE)
#define KERNEL_DS MAKE_MM_SEG(0xFFFFFFFF)
#define get_fs() (current_thread_info()->addr_limit)
#define set_fs(x) (current_thread_info()->addr_limit = (x))
#endif
#define uaccess_kernel() (get_fs().seg == KERNEL_DS.seg)
#endif /* __ASSEMBLY__ */
#endif /* _M68K_SEGMENT_H */
...@@ -4,7 +4,6 @@ ...@@ -4,7 +4,6 @@
#include <asm/types.h> #include <asm/types.h>
#include <asm/page.h> #include <asm/page.h>
#include <asm/segment.h>
/* /*
* On machines with 4k pages we default to an 8k thread size, though we * On machines with 4k pages we default to an 8k thread size, though we
...@@ -27,7 +26,6 @@ ...@@ -27,7 +26,6 @@
struct thread_info { struct thread_info {
struct task_struct *task; /* main task structure */ struct task_struct *task; /* main task structure */
unsigned long flags; unsigned long flags;
mm_segment_t addr_limit; /* thread address space */
int preempt_count; /* 0 => preemptable, <0 => BUG */ int preempt_count; /* 0 => preemptable, <0 => BUG */
__u32 cpu; /* should always be 0 on m68k */ __u32 cpu; /* should always be 0 on m68k */
unsigned long tp_value; /* thread pointer */ unsigned long tp_value; /* thread pointer */
...@@ -37,7 +35,6 @@ struct thread_info { ...@@ -37,7 +35,6 @@ struct thread_info {
#define INIT_THREAD_INFO(tsk) \ #define INIT_THREAD_INFO(tsk) \
{ \ { \
.task = &tsk, \ .task = &tsk, \
.addr_limit = KERNEL_DS, \
.preempt_count = INIT_PREEMPT_COUNT, \ .preempt_count = INIT_PREEMPT_COUNT, \
} }
......
...@@ -13,13 +13,12 @@ static inline void flush_tlb_kernel_page(void *addr) ...@@ -13,13 +13,12 @@ static inline void flush_tlb_kernel_page(void *addr)
if (CPU_IS_COLDFIRE) { if (CPU_IS_COLDFIRE) {
mmu_write(MMUOR, MMUOR_CNL); mmu_write(MMUOR, MMUOR_CNL);
} else if (CPU_IS_040_OR_060) { } else if (CPU_IS_040_OR_060) {
mm_segment_t old_fs = get_fs(); set_fc(SUPER_DATA);
set_fs(KERNEL_DS);
__asm__ __volatile__(".chip 68040\n\t" __asm__ __volatile__(".chip 68040\n\t"
"pflush (%0)\n\t" "pflush (%0)\n\t"
".chip 68k" ".chip 68k"
: : "a" (addr)); : : "a" (addr));
set_fs(old_fs); set_fc(USER_DATA);
} else if (CPU_IS_020_OR_030) } else if (CPU_IS_020_OR_030)
__asm__ __volatile__("pflush #4,#4,(%0)" : : "a" (addr)); __asm__ __volatile__("pflush #4,#4,(%0)" : : "a" (addr));
} }
...@@ -84,12 +83,8 @@ static inline void flush_tlb_mm(struct mm_struct *mm) ...@@ -84,12 +83,8 @@ static inline void flush_tlb_mm(struct mm_struct *mm)
static inline void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr) static inline void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
{ {
if (vma->vm_mm == current->active_mm) { if (vma->vm_mm == current->active_mm)
mm_segment_t old_fs = force_uaccess_begin();
__flush_tlb_one(addr); __flush_tlb_one(addr);
force_uaccess_end(old_fs);
}
} }
static inline void flush_tlb_range(struct vm_area_struct *vma, static inline void flush_tlb_range(struct vm_area_struct *vma,
......
...@@ -267,6 +267,10 @@ struct frame { ...@@ -267,6 +267,10 @@ struct frame {
} un; } un;
}; };
#ifdef CONFIG_M68040
asmlinkage void berr_040cleanup(struct frame *fp);
#endif
#endif /* __ASSEMBLY__ */ #endif /* __ASSEMBLY__ */
#endif /* _M68K_TRAPS_H */ #endif /* _M68K_TRAPS_H */
...@@ -9,13 +9,16 @@ ...@@ -9,13 +9,16 @@
*/ */
#include <linux/compiler.h> #include <linux/compiler.h>
#include <linux/types.h> #include <linux/types.h>
#include <asm/segment.h>
#include <asm/extable.h> #include <asm/extable.h>
/* We let the MMU do all checking */ /* We let the MMU do all checking */
static inline int access_ok(const void __user *addr, static inline int access_ok(const void __user *addr,
unsigned long size) unsigned long size)
{ {
/*
* XXX: for !CONFIG_CPU_HAS_ADDRESS_SPACES this really needs to check
* for TASK_SIZE!
*/
return 1; return 1;
} }
...@@ -35,12 +38,9 @@ static inline int access_ok(const void __user *addr, ...@@ -35,12 +38,9 @@ static inline int access_ok(const void __user *addr,
#define MOVES "move" #define MOVES "move"
#endif #endif
extern int __put_user_bad(void); #define __put_user_asm(inst, res, x, ptr, bwl, reg, err) \
extern int __get_user_bad(void);
#define __put_user_asm(res, x, ptr, bwl, reg, err) \
asm volatile ("\n" \ asm volatile ("\n" \
"1: "MOVES"."#bwl" %2,%1\n" \ "1: "inst"."#bwl" %2,%1\n" \
"2:\n" \ "2:\n" \
" .section .fixup,\"ax\"\n" \ " .section .fixup,\"ax\"\n" \
" .even\n" \ " .even\n" \
...@@ -56,6 +56,31 @@ asm volatile ("\n" \ ...@@ -56,6 +56,31 @@ asm volatile ("\n" \
: "+d" (res), "=m" (*(ptr)) \ : "+d" (res), "=m" (*(ptr)) \
: #reg (x), "i" (err)) : #reg (x), "i" (err))
#define __put_user_asm8(inst, res, x, ptr) \
do { \
const void *__pu_ptr = (const void __force *)(ptr); \
\
asm volatile ("\n" \
"1: "inst".l %2,(%1)+\n" \
"2: "inst".l %R2,(%1)\n" \
"3:\n" \
" .section .fixup,\"ax\"\n" \
" .even\n" \
"10: movel %3,%0\n" \
" jra 3b\n" \
" .previous\n" \
"\n" \
" .section __ex_table,\"a\"\n" \
" .align 4\n" \
" .long 1b,10b\n" \
" .long 2b,10b\n" \
" .long 3b,10b\n" \
" .previous" \
: "+d" (res), "+a" (__pu_ptr) \
: "r" (x), "i" (-EFAULT) \
: "memory"); \
} while (0)
/* /*
* These are the main single-value transfer routines. They automatically * These are the main single-value transfer routines. They automatically
* use the right size if we just have the right pointer type. * use the right size if we just have the right pointer type.
...@@ -68,51 +93,29 @@ asm volatile ("\n" \ ...@@ -68,51 +93,29 @@ asm volatile ("\n" \
__chk_user_ptr(ptr); \ __chk_user_ptr(ptr); \
switch (sizeof (*(ptr))) { \ switch (sizeof (*(ptr))) { \
case 1: \ case 1: \
__put_user_asm(__pu_err, __pu_val, ptr, b, d, -EFAULT); \ __put_user_asm(MOVES, __pu_err, __pu_val, ptr, b, d, -EFAULT); \
break; \ break; \
case 2: \ case 2: \
__put_user_asm(__pu_err, __pu_val, ptr, w, r, -EFAULT); \ __put_user_asm(MOVES, __pu_err, __pu_val, ptr, w, r, -EFAULT); \
break; \ break; \
case 4: \ case 4: \
__put_user_asm(__pu_err, __pu_val, ptr, l, r, -EFAULT); \ __put_user_asm(MOVES, __pu_err, __pu_val, ptr, l, r, -EFAULT); \
break; \ break; \
case 8: \ case 8: \
{ \ __put_user_asm8(MOVES, __pu_err, __pu_val, ptr); \
const void __user *__pu_ptr = (ptr); \
asm volatile ("\n" \
"1: "MOVES".l %2,(%1)+\n" \
"2: "MOVES".l %R2,(%1)\n" \
"3:\n" \
" .section .fixup,\"ax\"\n" \
" .even\n" \
"10: movel %3,%0\n" \
" jra 3b\n" \
" .previous\n" \
"\n" \
" .section __ex_table,\"a\"\n" \
" .align 4\n" \
" .long 1b,10b\n" \
" .long 2b,10b\n" \
" .long 3b,10b\n" \
" .previous" \
: "+d" (__pu_err), "+a" (__pu_ptr) \
: "r" (__pu_val), "i" (-EFAULT) \
: "memory"); \
break; \ break; \
} \
default: \ default: \
__pu_err = __put_user_bad(); \ BUILD_BUG(); \
break; \
} \ } \
__pu_err; \ __pu_err; \
}) })
#define put_user(x, ptr) __put_user(x, ptr) #define put_user(x, ptr) __put_user(x, ptr)
#define __get_user_asm(res, x, ptr, type, bwl, reg, err) ({ \ #define __get_user_asm(inst, res, x, ptr, type, bwl, reg, err) ({ \
type __gu_val; \ type __gu_val; \
asm volatile ("\n" \ asm volatile ("\n" \
"1: "MOVES"."#bwl" %2,%1\n" \ "1: "inst"."#bwl" %2,%1\n" \
"2:\n" \ "2:\n" \
" .section .fixup,\"ax\"\n" \ " .section .fixup,\"ax\"\n" \
" .even\n" \ " .even\n" \
...@@ -130,53 +133,57 @@ asm volatile ("\n" \ ...@@ -130,53 +133,57 @@ asm volatile ("\n" \
(x) = (__force typeof(*(ptr)))(__force unsigned long)__gu_val; \ (x) = (__force typeof(*(ptr)))(__force unsigned long)__gu_val; \
}) })
#define __get_user_asm8(inst, res, x, ptr) \
do { \
const void *__gu_ptr = (const void __force *)(ptr); \
union { \
u64 l; \
__typeof__(*(ptr)) t; \
} __gu_val; \
\
asm volatile ("\n" \
"1: "inst".l (%2)+,%1\n" \
"2: "inst".l (%2),%R1\n" \
"3:\n" \
" .section .fixup,\"ax\"\n" \
" .even\n" \
"10: move.l %3,%0\n" \
" sub.l %1,%1\n" \
" sub.l %R1,%R1\n" \
" jra 3b\n" \
" .previous\n" \
"\n" \
" .section __ex_table,\"a\"\n" \
" .align 4\n" \
" .long 1b,10b\n" \
" .long 2b,10b\n" \
" .previous" \
: "+d" (res), "=&r" (__gu_val.l), \
"+a" (__gu_ptr) \
: "i" (-EFAULT) \
: "memory"); \
(x) = __gu_val.t; \
} while (0)
#define __get_user(x, ptr) \ #define __get_user(x, ptr) \
({ \ ({ \
int __gu_err = 0; \ int __gu_err = 0; \
__chk_user_ptr(ptr); \ __chk_user_ptr(ptr); \
switch (sizeof(*(ptr))) { \ switch (sizeof(*(ptr))) { \
case 1: \ case 1: \
__get_user_asm(__gu_err, x, ptr, u8, b, d, -EFAULT); \ __get_user_asm(MOVES, __gu_err, x, ptr, u8, b, d, -EFAULT); \
break; \ break; \
case 2: \ case 2: \
__get_user_asm(__gu_err, x, ptr, u16, w, r, -EFAULT); \ __get_user_asm(MOVES, __gu_err, x, ptr, u16, w, r, -EFAULT); \
break; \ break; \
case 4: \ case 4: \
__get_user_asm(__gu_err, x, ptr, u32, l, r, -EFAULT); \ __get_user_asm(MOVES, __gu_err, x, ptr, u32, l, r, -EFAULT); \
break; \ break; \
case 8: { \ case 8: \
const void __user *__gu_ptr = (ptr); \ __get_user_asm8(MOVES, __gu_err, x, ptr); \
union { \
u64 l; \
__typeof__(*(ptr)) t; \
} __gu_val; \
asm volatile ("\n" \
"1: "MOVES".l (%2)+,%1\n" \
"2: "MOVES".l (%2),%R1\n" \
"3:\n" \
" .section .fixup,\"ax\"\n" \
" .even\n" \
"10: move.l %3,%0\n" \
" sub.l %1,%1\n" \
" sub.l %R1,%R1\n" \
" jra 3b\n" \
" .previous\n" \
"\n" \
" .section __ex_table,\"a\"\n" \
" .align 4\n" \
" .long 1b,10b\n" \
" .long 2b,10b\n" \
" .previous" \
: "+d" (__gu_err), "=&r" (__gu_val.l), \
"+a" (__gu_ptr) \
: "i" (-EFAULT) \
: "memory"); \
(x) = __gu_val.t; \
break; \ break; \
} \
default: \ default: \
__gu_err = __get_user_bad(); \ BUILD_BUG(); \
break; \
} \ } \
__gu_err; \ __gu_err; \
}) })
...@@ -322,16 +329,19 @@ __constant_copy_to_user(void __user *to, const void *from, unsigned long n) ...@@ -322,16 +329,19 @@ __constant_copy_to_user(void __user *to, const void *from, unsigned long n)
switch (n) { switch (n) {
case 1: case 1:
__put_user_asm(res, *(u8 *)from, (u8 __user *)to, b, d, 1); __put_user_asm(MOVES, res, *(u8 *)from, (u8 __user *)to,
b, d, 1);
break; break;
case 2: case 2:
__put_user_asm(res, *(u16 *)from, (u16 __user *)to, w, r, 2); __put_user_asm(MOVES, res, *(u16 *)from, (u16 __user *)to,
w, r, 2);
break; break;
case 3: case 3:
__constant_copy_to_user_asm(res, to, from, tmp, 3, w, b,); __constant_copy_to_user_asm(res, to, from, tmp, 3, w, b,);
break; break;
case 4: case 4:
__put_user_asm(res, *(u32 *)from, (u32 __user *)to, l, r, 4); __put_user_asm(MOVES, res, *(u32 *)from, (u32 __user *)to,
l, r, 4);
break; break;
case 5: case 5:
__constant_copy_to_user_asm(res, to, from, tmp, 5, l, b,); __constant_copy_to_user_asm(res, to, from, tmp, 5, l, b,);
...@@ -380,8 +390,65 @@ raw_copy_to_user(void __user *to, const void *from, unsigned long n) ...@@ -380,8 +390,65 @@ raw_copy_to_user(void __user *to, const void *from, unsigned long n)
#define INLINE_COPY_FROM_USER #define INLINE_COPY_FROM_USER
#define INLINE_COPY_TO_USER #define INLINE_COPY_TO_USER
#define user_addr_max() \ #define HAVE_GET_KERNEL_NOFAULT
(uaccess_kernel() ? ~0UL : TASK_SIZE)
#define __get_kernel_nofault(dst, src, type, err_label) \
do { \
type *__gk_dst = (type *)(dst); \
type *__gk_src = (type *)(src); \
int __gk_err = 0; \
\
switch (sizeof(type)) { \
case 1: \
__get_user_asm("move", __gk_err, *__gk_dst, __gk_src, \
u8, b, d, -EFAULT); \
break; \
case 2: \
__get_user_asm("move", __gk_err, *__gk_dst, __gk_src, \
u16, w, r, -EFAULT); \
break; \
case 4: \
__get_user_asm("move", __gk_err, *__gk_dst, __gk_src, \
u32, l, r, -EFAULT); \
break; \
case 8: \
__get_user_asm8("move", __gk_err, *__gk_dst, __gk_src); \
break; \
default: \
BUILD_BUG(); \
} \
if (unlikely(__gk_err)) \
goto err_label; \
} while (0)
#define __put_kernel_nofault(dst, src, type, err_label) \
do { \
type __pk_src = *(type *)(src); \
type *__pk_dst = (type *)(dst); \
int __pk_err = 0; \
\
switch (sizeof(type)) { \
case 1: \
__put_user_asm("move", __pk_err, __pk_src, __pk_dst, \
b, d, -EFAULT); \
break; \
case 2: \
__put_user_asm("move", __pk_err, __pk_src, __pk_dst, \
w, r, -EFAULT); \
break; \
case 4: \
__put_user_asm("move", __pk_err, __pk_src, __pk_dst, \
l, r, -EFAULT); \
break; \
case 8: \
__put_user_asm8("move", __pk_err, __pk_src, __pk_dst); \
break; \
default: \
BUILD_BUG(); \
} \
if (unlikely(__pk_err)) \
goto err_label; \
} while (0)
extern long strncpy_from_user(char *dst, const char __user *src, long count); extern long strncpy_from_user(char *dst, const char __user *src, long count);
extern __must_check long strnlen_user(const char __user *str, long n); extern __must_check long strnlen_user(const char __user *str, long n);
......
...@@ -31,7 +31,7 @@ int main(void) ...@@ -31,7 +31,7 @@ int main(void)
DEFINE(THREAD_KSP, offsetof(struct thread_struct, ksp)); DEFINE(THREAD_KSP, offsetof(struct thread_struct, ksp));
DEFINE(THREAD_USP, offsetof(struct thread_struct, usp)); DEFINE(THREAD_USP, offsetof(struct thread_struct, usp));
DEFINE(THREAD_SR, offsetof(struct thread_struct, sr)); DEFINE(THREAD_SR, offsetof(struct thread_struct, sr));
DEFINE(THREAD_FS, offsetof(struct thread_struct, fs)); DEFINE(THREAD_FC, offsetof(struct thread_struct, fc));
DEFINE(THREAD_CRP, offsetof(struct thread_struct, crp)); DEFINE(THREAD_CRP, offsetof(struct thread_struct, crp));
DEFINE(THREAD_ESP0, offsetof(struct thread_struct, esp0)); DEFINE(THREAD_ESP0, offsetof(struct thread_struct, esp0));
DEFINE(THREAD_FPREG, offsetof(struct thread_struct, fp)); DEFINE(THREAD_FPREG, offsetof(struct thread_struct, fp));
......
...@@ -36,7 +36,6 @@ ...@@ -36,7 +36,6 @@
#include <linux/linkage.h> #include <linux/linkage.h>
#include <asm/errno.h> #include <asm/errno.h>
#include <asm/setup.h> #include <asm/setup.h>
#include <asm/segment.h>
#include <asm/traps.h> #include <asm/traps.h>
#include <asm/unistd.h> #include <asm/unistd.h>
#include <asm/asm-offsets.h> #include <asm/asm-offsets.h>
...@@ -78,20 +77,38 @@ ENTRY(__sys_clone3) ...@@ -78,20 +77,38 @@ ENTRY(__sys_clone3)
ENTRY(sys_sigreturn) ENTRY(sys_sigreturn)
SAVE_SWITCH_STACK SAVE_SWITCH_STACK
movel %sp,%sp@- | switch_stack pointer movel %sp,%a1 | switch_stack pointer
pea %sp@(SWITCH_STACK_SIZE+4) | pt_regs pointer lea %sp@(SWITCH_STACK_SIZE),%a0 | pt_regs pointer
lea %sp@(-84),%sp | leave a gap
movel %a1,%sp@-
movel %a0,%sp@-
jbsr do_sigreturn jbsr do_sigreturn
addql #8,%sp jra 1f | shared with rt_sigreturn()
RESTORE_SWITCH_STACK
rts
ENTRY(sys_rt_sigreturn) ENTRY(sys_rt_sigreturn)
SAVE_SWITCH_STACK SAVE_SWITCH_STACK
movel %sp,%sp@- | switch_stack pointer movel %sp,%a1 | switch_stack pointer
pea %sp@(SWITCH_STACK_SIZE+4) | pt_regs pointer lea %sp@(SWITCH_STACK_SIZE),%a0 | pt_regs pointer
lea %sp@(-84),%sp | leave a gap
movel %a1,%sp@-
movel %a0,%sp@-
| stack contents:
| [original pt_regs address] [original switch_stack address]
| [gap] [switch_stack] [pt_regs] [exception frame]
jbsr do_rt_sigreturn jbsr do_rt_sigreturn
addql #8,%sp
1:
| stack contents now:
| [original pt_regs address] [original switch_stack address]
| [unused part of the gap] [moved switch_stack] [moved pt_regs]
| [replacement exception frame]
| return value of do_{rt_,}sigreturn() points to moved switch_stack.
movel %d0,%sp | discard the leftover junk
RESTORE_SWITCH_STACK RESTORE_SWITCH_STACK
| stack contents now is just [syscall return address] [pt_regs] [frame]
| return pt_regs.d0
movel %sp@(PT_OFF_D0+4),%d0
rts rts
ENTRY(buserr) ENTRY(buserr)
...@@ -182,25 +199,6 @@ do_trace_exit: ...@@ -182,25 +199,6 @@ do_trace_exit:
addql #4,%sp addql #4,%sp
jra .Lret_from_exception jra .Lret_from_exception
ENTRY(ret_from_signal)
movel %curptr@(TASK_STACK),%a1
tstb %a1@(TINFO_FLAGS+2)
jge 1f
jbsr syscall_trace
1: RESTORE_SWITCH_STACK
addql #4,%sp
/* on 68040 complete pending writebacks if any */
#ifdef CONFIG_M68040
bfextu %sp@(PT_OFF_FORMATVEC){#0,#4},%d0
subql #7,%d0 | bus error frame ?
jbne 1f
movel %sp,%sp@-
jbsr berr_040cleanup
addql #4,%sp
1:
#endif
jra .Lret_from_exception
ENTRY(system_call) ENTRY(system_call)
SAVE_ALL_SYS SAVE_ALL_SYS
...@@ -338,7 +336,7 @@ resume: ...@@ -338,7 +336,7 @@ resume:
/* save fs (sfc,%dfc) (may be pointing to kernel memory) */ /* save fs (sfc,%dfc) (may be pointing to kernel memory) */
movec %sfc,%d0 movec %sfc,%d0
movew %d0,%a0@(TASK_THREAD+THREAD_FS) movew %d0,%a0@(TASK_THREAD+THREAD_FC)
/* save usp */ /* save usp */
/* it is better to use a movel here instead of a movew 8*) */ /* it is better to use a movel here instead of a movew 8*) */
...@@ -424,7 +422,7 @@ resume: ...@@ -424,7 +422,7 @@ resume:
movel %a0,%usp movel %a0,%usp
/* restore fs (sfc,%dfc) */ /* restore fs (sfc,%dfc) */
movew %a1@(TASK_THREAD+THREAD_FS),%a0 movew %a1@(TASK_THREAD+THREAD_FC),%a0
movec %a0,%sfc movec %a0,%sfc
movec %a0,%dfc movec %a0,%dfc
......
...@@ -92,7 +92,7 @@ void show_regs(struct pt_regs * regs) ...@@ -92,7 +92,7 @@ void show_regs(struct pt_regs * regs)
void flush_thread(void) void flush_thread(void)
{ {
current->thread.fs = __USER_DS; current->thread.fc = USER_DATA;
#ifdef CONFIG_FPU #ifdef CONFIG_FPU
if (!FPU_IS_EMU) { if (!FPU_IS_EMU) {
unsigned long zero = 0; unsigned long zero = 0;
...@@ -155,7 +155,7 @@ int copy_thread(unsigned long clone_flags, unsigned long usp, unsigned long arg, ...@@ -155,7 +155,7 @@ int copy_thread(unsigned long clone_flags, unsigned long usp, unsigned long arg,
* Must save the current SFC/DFC value, NOT the value when * Must save the current SFC/DFC value, NOT the value when
* the parent was last descheduled - RGH 10-08-96 * the parent was last descheduled - RGH 10-08-96
*/ */
p->thread.fs = get_fs().seg; p->thread.fc = USER_DATA;
if (unlikely(p->flags & (PF_KTHREAD | PF_IO_WORKER))) { if (unlikely(p->flags & (PF_KTHREAD | PF_IO_WORKER))) {
/* kernel thread */ /* kernel thread */
......
This diff is collapsed.
...@@ -181,9 +181,8 @@ static inline void access_error060 (struct frame *fp) ...@@ -181,9 +181,8 @@ static inline void access_error060 (struct frame *fp)
static inline unsigned long probe040(int iswrite, unsigned long addr, int wbs) static inline unsigned long probe040(int iswrite, unsigned long addr, int wbs)
{ {
unsigned long mmusr; unsigned long mmusr;
mm_segment_t old_fs = get_fs();
set_fs(MAKE_MM_SEG(wbs)); set_fc(wbs);
if (iswrite) if (iswrite)
asm volatile (".chip 68040; ptestw (%0); .chip 68k" : : "a" (addr)); asm volatile (".chip 68040; ptestw (%0); .chip 68k" : : "a" (addr));
...@@ -192,7 +191,7 @@ static inline unsigned long probe040(int iswrite, unsigned long addr, int wbs) ...@@ -192,7 +191,7 @@ static inline unsigned long probe040(int iswrite, unsigned long addr, int wbs)
asm volatile (".chip 68040; movec %%mmusr,%0; .chip 68k" : "=r" (mmusr)); asm volatile (".chip 68040; movec %%mmusr,%0; .chip 68k" : "=r" (mmusr));
set_fs(old_fs); set_fc(USER_DATA);
return mmusr; return mmusr;
} }
...@@ -201,10 +200,8 @@ static inline int do_040writeback1(unsigned short wbs, unsigned long wba, ...@@ -201,10 +200,8 @@ static inline int do_040writeback1(unsigned short wbs, unsigned long wba,
unsigned long wbd) unsigned long wbd)
{ {
int res = 0; int res = 0;
mm_segment_t old_fs = get_fs();
/* set_fs can not be moved, otherwise put_user() may oops */ set_fc(wbs);
set_fs(MAKE_MM_SEG(wbs));
switch (wbs & WBSIZ_040) { switch (wbs & WBSIZ_040) {
case BA_SIZE_BYTE: case BA_SIZE_BYTE:
...@@ -218,9 +215,7 @@ static inline int do_040writeback1(unsigned short wbs, unsigned long wba, ...@@ -218,9 +215,7 @@ static inline int do_040writeback1(unsigned short wbs, unsigned long wba,
break; break;
} }
/* set_fs can not be moved, otherwise put_user() may oops */ set_fc(USER_DATA);
set_fs(old_fs);
pr_debug("do_040writeback1, res=%d\n", res); pr_debug("do_040writeback1, res=%d\n", res);
......
...@@ -18,7 +18,6 @@ ...@@ -18,7 +18,6 @@
#include <linux/uaccess.h> #include <linux/uaccess.h>
#include <asm/io.h> #include <asm/io.h>
#include <asm/segment.h>
#include <asm/setup.h> #include <asm/setup.h>
#include <asm/macintosh.h> #include <asm/macintosh.h>
#include <asm/mac_via.h> #include <asm/mac_via.h>
......
...@@ -49,24 +49,7 @@ static unsigned long virt_to_phys_slow(unsigned long vaddr) ...@@ -49,24 +49,7 @@ static unsigned long virt_to_phys_slow(unsigned long vaddr)
if (mmusr & MMU_R_040) if (mmusr & MMU_R_040)
return (mmusr & PAGE_MASK) | (vaddr & ~PAGE_MASK); return (mmusr & PAGE_MASK) | (vaddr & ~PAGE_MASK);
} else { } else {
unsigned short mmusr; WARN_ON_ONCE(!CPU_IS_040_OR_060);
unsigned long *descaddr;
asm volatile ("ptestr %3,%2@,#7,%0\n\t"
"pmove %%psr,%1"
: "=a&" (descaddr), "=m" (mmusr)
: "a" (vaddr), "d" (get_fs().seg));
if (mmusr & (MMU_I|MMU_B|MMU_L))
return 0;
descaddr = phys_to_virt((unsigned long)descaddr);
switch (mmusr & MMU_NUM) {
case 1:
return (*descaddr & 0xfe000000) | (vaddr & 0x01ffffff);
case 2:
return (*descaddr & 0xfffc0000) | (vaddr & 0x0003ffff);
case 3:
return (*descaddr & PAGE_MASK) | (vaddr & ~PAGE_MASK);
}
} }
return 0; return 0;
} }
...@@ -107,11 +90,9 @@ void flush_icache_user_range(unsigned long address, unsigned long endaddr) ...@@ -107,11 +90,9 @@ void flush_icache_user_range(unsigned long address, unsigned long endaddr)
void flush_icache_range(unsigned long address, unsigned long endaddr) void flush_icache_range(unsigned long address, unsigned long endaddr)
{ {
mm_segment_t old_fs = get_fs(); set_fc(SUPER_DATA);
set_fs(KERNEL_DS);
flush_icache_user_range(address, endaddr); flush_icache_user_range(address, endaddr);
set_fs(old_fs); set_fc(USER_DATA);
} }
EXPORT_SYMBOL(flush_icache_range); EXPORT_SYMBOL(flush_icache_range);
......
...@@ -72,12 +72,6 @@ void __init paging_init(void) ...@@ -72,12 +72,6 @@ void __init paging_init(void)
if (!empty_zero_page) if (!empty_zero_page)
panic("%s: Failed to allocate %lu bytes align=0x%lx\n", panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
__func__, PAGE_SIZE, PAGE_SIZE); __func__, PAGE_SIZE, PAGE_SIZE);
/*
* Set up SFC/DFC registers (user data space).
*/
set_fs (USER_DS);
max_zone_pfn[ZONE_DMA] = end_mem >> PAGE_SHIFT; max_zone_pfn[ZONE_DMA] = end_mem >> PAGE_SHIFT;
free_area_init(max_zone_pfn); free_area_init(max_zone_pfn);
} }
......
...@@ -17,7 +17,6 @@ ...@@ -17,7 +17,6 @@
#include <linux/vmalloc.h> #include <linux/vmalloc.h>
#include <asm/setup.h> #include <asm/setup.h>
#include <asm/segment.h>
#include <asm/page.h> #include <asm/page.h>
#include <asm/io.h> #include <asm/io.h>
#include <asm/tlbflush.h> #include <asm/tlbflush.h>
......
...@@ -15,7 +15,6 @@ ...@@ -15,7 +15,6 @@
#include <linux/gfp.h> #include <linux/gfp.h>
#include <asm/setup.h> #include <asm/setup.h>
#include <asm/segment.h>
#include <asm/page.h> #include <asm/page.h>
#include <asm/traps.h> #include <asm/traps.h>
#include <asm/machdep.h> #include <asm/machdep.h>
......
...@@ -467,7 +467,7 @@ void __init paging_init(void) ...@@ -467,7 +467,7 @@ void __init paging_init(void)
/* /*
* Set up SFC/DFC registers * Set up SFC/DFC registers
*/ */
set_fs(KERNEL_DS); set_fc(USER_DATA);
#ifdef DEBUG #ifdef DEBUG
printk ("before free_area_init\n"); printk ("before free_area_init\n");
......
...@@ -31,7 +31,6 @@ ...@@ -31,7 +31,6 @@
#include <asm/intersil.h> #include <asm/intersil.h>
#include <asm/irq.h> #include <asm/irq.h>
#include <asm/sections.h> #include <asm/sections.h>
#include <asm/segment.h>
#include <asm/sun3ints.h> #include <asm/sun3ints.h>
char sun3_reserved_pmeg[SUN3_PMEGS_NUM]; char sun3_reserved_pmeg[SUN3_PMEGS_NUM];
...@@ -89,7 +88,7 @@ void __init sun3_init(void) ...@@ -89,7 +88,7 @@ void __init sun3_init(void)
sun3_reserved_pmeg[249] = 1; sun3_reserved_pmeg[249] = 1;
sun3_reserved_pmeg[252] = 1; sun3_reserved_pmeg[252] = 1;
sun3_reserved_pmeg[253] = 1; sun3_reserved_pmeg[253] = 1;
set_fs(KERNEL_DS); set_fc(USER_DATA);
} }
/* Without this, Bad Things happen when something calls arch_reset. */ /* Without this, Bad Things happen when something calls arch_reset. */
......
...@@ -23,7 +23,6 @@ ...@@ -23,7 +23,6 @@
#include <linux/uaccess.h> #include <linux/uaccess.h>
#include <asm/page.h> #include <asm/page.h>
#include <asm/sun3mmu.h> #include <asm/sun3mmu.h>
#include <asm/segment.h>
#include <asm/oplib.h> #include <asm/oplib.h>
#include <asm/mmu_context.h> #include <asm/mmu_context.h>
#include <asm/dvma.h> #include <asm/dvma.h>
...@@ -191,14 +190,13 @@ void __init mmu_emu_init(unsigned long bootmem_end) ...@@ -191,14 +190,13 @@ void __init mmu_emu_init(unsigned long bootmem_end)
for(seg = 0; seg < PAGE_OFFSET; seg += SUN3_PMEG_SIZE) for(seg = 0; seg < PAGE_OFFSET; seg += SUN3_PMEG_SIZE)
sun3_put_segmap(seg, SUN3_INVALID_PMEG); sun3_put_segmap(seg, SUN3_INVALID_PMEG);
set_fs(MAKE_MM_SEG(3)); set_fc(3);
for(seg = 0; seg < 0x10000000; seg += SUN3_PMEG_SIZE) { for(seg = 0; seg < 0x10000000; seg += SUN3_PMEG_SIZE) {
i = sun3_get_segmap(seg); i = sun3_get_segmap(seg);
for(j = 1; j < CONTEXTS_NUM; j++) for(j = 1; j < CONTEXTS_NUM; j++)
(*(romvec->pv_setctxt))(j, (void *)seg, i); (*(romvec->pv_setctxt))(j, (void *)seg, i);
} }
set_fs(KERNEL_DS); set_fc(USER_DATA);
} }
/* erase the mappings for a dead context. Uses the pg_dir for hints /* erase the mappings for a dead context. Uses the pg_dir for hints
......
...@@ -11,7 +11,6 @@ ...@@ -11,7 +11,6 @@
#include <linux/sched.h> #include <linux/sched.h>
#include <linux/kernel_stat.h> #include <linux/kernel_stat.h>
#include <linux/interrupt.h> #include <linux/interrupt.h>
#include <asm/segment.h>
#include <asm/intersil.h> #include <asm/intersil.h>
#include <asm/oplib.h> #include <asm/oplib.h>
#include <asm/sun3ints.h> #include <asm/sun3ints.h>
......
...@@ -14,7 +14,6 @@ ...@@ -14,7 +14,6 @@
#include <asm/traps.h> #include <asm/traps.h>
#include <asm/sun3xprom.h> #include <asm/sun3xprom.h>
#include <asm/idprom.h> #include <asm/idprom.h>
#include <asm/segment.h>
#include <asm/sun3ints.h> #include <asm/sun3ints.h>
#include <asm/openprom.h> #include <asm/openprom.h>
#include <asm/machines.h> #include <asm/machines.h>
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment