Commit 445c682b authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-linus' of git://git.monstr.eu/linux-2.6-microblaze

* 'for-linus' of git://git.monstr.eu/linux-2.6-microblaze: (35 commits)
  microblaze: Support word copying in copy_tofrom_user
  microblaze: Print early printk information to log buffer
  microblaze: head.S typo fix
  microblaze: Use MICROBLAZE_TLB_SIZE in asm code
  microblaze: Kconfig Fix - pci
  microblaze: Adding likely macros
  microblaze: Add .type and .size to ASM functions
  microblaze: Fix TLB macros
  microblaze: Use instruction with delay slot
  microblaze: Remove additional resr and rear loading
  microblaze: Change register usage for ESR and EAR
  microblaze: Prepare work for optimization in exception code
  microblaze: Add DEBUG option
  microblaze: Support systems without lmb bram
  microblaze: uaccess: Sync strlen, strnlen, copy_to/from_user
  microblaze: uaccess: Unify __copy_tofrom_user
  microblaze: uaccess: Move functions to generic location
  microblaze: uaccess: Fix put_user for noMMU
  microblaze: uaccess: Fix get_user macro for noMMU
  microblaze: uaccess: fix clear_user for noMMU kernel
  ...
parents c7681f46 ca3865ba
...@@ -75,9 +75,6 @@ config LOCKDEP_SUPPORT ...@@ -75,9 +75,6 @@ config LOCKDEP_SUPPORT
config HAVE_LATENCYTOP_SUPPORT config HAVE_LATENCYTOP_SUPPORT
def_bool y def_bool y
config PCI
def_bool n
config DTC config DTC
def_bool y def_bool y
......
...@@ -84,7 +84,7 @@ define archhelp ...@@ -84,7 +84,7 @@ define archhelp
echo '* linux.bin - Create raw binary' echo '* linux.bin - Create raw binary'
echo ' linux.bin.gz - Create compressed raw binary' echo ' linux.bin.gz - Create compressed raw binary'
echo ' simpleImage.<dt> - ELF image with $(arch)/boot/dts/<dt>.dts linked in' echo ' simpleImage.<dt> - ELF image with $(arch)/boot/dts/<dt>.dts linked in'
echo ' - stripped elf with fdt blob echo ' - stripped elf with fdt blob'
echo ' simpleImage.<dt>.unstrip - full ELF image with fdt blob' echo ' simpleImage.<dt>.unstrip - full ELF image with fdt blob'
echo ' *_defconfig - Select default config from arch/microblaze/configs' echo ' *_defconfig - Select default config from arch/microblaze/configs'
echo '' echo ''
...@@ -94,3 +94,5 @@ define archhelp ...@@ -94,3 +94,5 @@ define archhelp
echo ' name of a dts file from the arch/microblaze/boot/dts/ directory' echo ' name of a dts file from the arch/microblaze/boot/dts/ directory'
echo ' (minus the .dts extension).' echo ' (minus the .dts extension).'
endef endef
MRPROPER_FILES += $(boot)/simpleImage.*
...@@ -23,8 +23,6 @@ $(obj)/system.dtb: $(obj)/$(DTB).dtb ...@@ -23,8 +23,6 @@ $(obj)/system.dtb: $(obj)/$(DTB).dtb
endif endif
$(obj)/linux.bin: vmlinux FORCE $(obj)/linux.bin: vmlinux FORCE
[ -n $(CONFIG_INITRAMFS_SOURCE) ] && [ ! -e $(CONFIG_INITRAMFS_SOURCE) ] && \
touch $(CONFIG_INITRAMFS_SOURCE) || echo "No CPIO image"
$(call if_changed,objcopy) $(call if_changed,objcopy)
$(call if_changed,uimage) $(call if_changed,uimage)
@echo 'Kernel: $@ is ready' ' (#'`cat .version`')' @echo 'Kernel: $@ is ready' ' (#'`cat .version`')'
...@@ -62,6 +60,4 @@ quiet_cmd_dtc = DTC $@ ...@@ -62,6 +60,4 @@ quiet_cmd_dtc = DTC $@
$(obj)/%.dtb: $(dtstree)/%.dts FORCE $(obj)/%.dtb: $(dtstree)/%.dts FORCE
$(call if_changed,dtc) $(call if_changed,dtc)
clean-kernel += linux.bin linux.bin.gz simpleImage.* clean-files += *.dtb simpleImage.*.unstrip linux.bin.ub
clean-files += *.dtb simpleImage.*.unstrip
...@@ -14,7 +14,6 @@ ...@@ -14,7 +14,6 @@
#include <asm/ptrace.h> #include <asm/ptrace.h>
#include <asm/setup.h> #include <asm/setup.h>
#include <asm/registers.h> #include <asm/registers.h>
#include <asm/segment.h>
#include <asm/entry.h> #include <asm/entry.h>
#include <asm/current.h> #include <asm/current.h>
......
/*
* Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu>
* Copyright (C) 2008-2009 PetaLogix
* Copyright (C) 2006 Atmark Techno, Inc.
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#ifndef _ASM_MICROBLAZE_SEGMENT_H
#define _ASM_MICROBLAZE_SEGMENT_H
# ifndef __ASSEMBLY__
typedef struct {
unsigned long seg;
} mm_segment_t;
/*
* On Microblaze the fs value is actually the top of the corresponding
* address space.
*
* The fs value determines whether argument validity checking should be
* performed or not. If get_fs() == USER_DS, checking is performed, with
* get_fs() == KERNEL_DS, checking is bypassed.
*
* For historical reasons, these macros are grossly misnamed.
*
* For non-MMU arch like Microblaze, KERNEL_DS and USER_DS is equal.
*/
# define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })
# ifndef CONFIG_MMU
# define KERNEL_DS MAKE_MM_SEG(0)
# define USER_DS KERNEL_DS
# else
# define KERNEL_DS MAKE_MM_SEG(0xFFFFFFFF)
# define USER_DS MAKE_MM_SEG(TASK_SIZE - 1)
# endif
# define get_ds() (KERNEL_DS)
# define get_fs() (current_thread_info()->addr_limit)
# define set_fs(val) (current_thread_info()->addr_limit = (val))
# define segment_eq(a, b) ((a).seg == (b).seg)
# endif /* __ASSEMBLY__ */
#endif /* _ASM_MICROBLAZE_SEGMENT_H */
...@@ -19,7 +19,6 @@ ...@@ -19,7 +19,6 @@
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
# include <linux/types.h> # include <linux/types.h>
# include <asm/processor.h> # include <asm/processor.h>
# include <asm/segment.h>
/* /*
* low level task data that entry.S needs immediate access to * low level task data that entry.S needs immediate access to
...@@ -60,6 +59,10 @@ struct cpu_context { ...@@ -60,6 +59,10 @@ struct cpu_context {
__u32 fsr; __u32 fsr;
}; };
typedef struct {
unsigned long seg;
} mm_segment_t;
struct thread_info { struct thread_info {
struct task_struct *task; /* main task structure */ struct task_struct *task; /* main task structure */
struct exec_domain *exec_domain; /* execution domain */ struct exec_domain *exec_domain; /* execution domain */
......
...@@ -24,6 +24,7 @@ extern void _tlbie(unsigned long address); ...@@ -24,6 +24,7 @@ extern void _tlbie(unsigned long address);
extern void _tlbia(void); extern void _tlbia(void);
#define __tlbia() { preempt_disable(); _tlbia(); preempt_enable(); } #define __tlbia() { preempt_disable(); _tlbia(); preempt_enable(); }
#define __tlbie(x) { _tlbie(x); }
static inline void local_flush_tlb_all(void) static inline void local_flush_tlb_all(void)
{ __tlbia(); } { __tlbia(); }
...@@ -31,7 +32,7 @@ static inline void local_flush_tlb_mm(struct mm_struct *mm) ...@@ -31,7 +32,7 @@ static inline void local_flush_tlb_mm(struct mm_struct *mm)
{ __tlbia(); } { __tlbia(); }
static inline void local_flush_tlb_page(struct vm_area_struct *vma, static inline void local_flush_tlb_page(struct vm_area_struct *vma,
unsigned long vmaddr) unsigned long vmaddr)
{ _tlbie(vmaddr); } { __tlbie(vmaddr); }
static inline void local_flush_tlb_range(struct vm_area_struct *vma, static inline void local_flush_tlb_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end) unsigned long start, unsigned long end)
{ __tlbia(); } { __tlbia(); }
......
...@@ -22,101 +22,73 @@ ...@@ -22,101 +22,73 @@
#include <asm/mmu.h> #include <asm/mmu.h>
#include <asm/page.h> #include <asm/page.h>
#include <asm/pgtable.h> #include <asm/pgtable.h>
#include <asm/segment.h>
#include <linux/string.h> #include <linux/string.h>
#define VERIFY_READ 0 #define VERIFY_READ 0
#define VERIFY_WRITE 1 #define VERIFY_WRITE 1
#define __clear_user(addr, n) (memset((void *)(addr), 0, (n)), 0) /*
* On Microblaze the fs value is actually the top of the corresponding
#ifndef CONFIG_MMU * address space.
*
extern int ___range_ok(unsigned long addr, unsigned long size); * The fs value determines whether argument validity checking should be
* performed or not. If get_fs() == USER_DS, checking is performed, with
#define __range_ok(addr, size) \ * get_fs() == KERNEL_DS, checking is bypassed.
___range_ok((unsigned long)(addr), (unsigned long)(size)) *
* For historical reasons, these macros are grossly misnamed.
#define access_ok(type, addr, size) (__range_ok((addr), (size)) == 0) *
#define __access_ok(add, size) (__range_ok((addr), (size)) == 0) * For non-MMU arch like Microblaze, KERNEL_DS and USER_DS is equal.
*/
/* Undefined function to trigger linker error */ # define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })
extern int bad_user_access_length(void);
/* FIXME this is function for optimalization -> memcpy */
#define __get_user(var, ptr) \
({ \
int __gu_err = 0; \
switch (sizeof(*(ptr))) { \
case 1: \
case 2: \
case 4: \
(var) = *(ptr); \
break; \
case 8: \
memcpy((void *) &(var), (ptr), 8); \
break; \
default: \
(var) = 0; \
__gu_err = __get_user_bad(); \
break; \
} \
__gu_err; \
})
#define __get_user_bad() (bad_user_access_length(), (-EFAULT)) # ifndef CONFIG_MMU
# define KERNEL_DS MAKE_MM_SEG(0)
# define USER_DS KERNEL_DS
# else
# define KERNEL_DS MAKE_MM_SEG(0xFFFFFFFF)
# define USER_DS MAKE_MM_SEG(TASK_SIZE - 1)
# endif
/* FIXME is not there defined __pu_val */ # define get_ds() (KERNEL_DS)
#define __put_user(var, ptr) \ # define get_fs() (current_thread_info()->addr_limit)
({ \ # define set_fs(val) (current_thread_info()->addr_limit = (val))
int __pu_err = 0; \
switch (sizeof(*(ptr))) { \
case 1: \
case 2: \
case 4: \
*(ptr) = (var); \
break; \
case 8: { \
typeof(*(ptr)) __pu_val = (var); \
memcpy(ptr, &__pu_val, sizeof(__pu_val)); \
} \
break; \
default: \
__pu_err = __put_user_bad(); \
break; \
} \
__pu_err; \
})
#define __put_user_bad() (bad_user_access_length(), (-EFAULT)) # define segment_eq(a, b) ((a).seg == (b).seg)
#define put_user(x, ptr) __put_user((x), (ptr)) /*
#define get_user(x, ptr) __get_user((x), (ptr)) * The exception table consists of pairs of addresses: the first is the
* address of an instruction that is allowed to fault, and the second is
* the address at which the program should continue. No registers are
* modified, so it is entirely up to the continuation code to figure out
* what to do.
*
* All the routines below use bits of fixup code that are out of line
* with the main instruction path. This means when everything is well,
* we don't even have to jump over them. Further, they do not intrude
* on our cache or tlb entries.
*/
struct exception_table_entry {
unsigned long insn, fixup;
};
#define copy_to_user(to, from, n) (memcpy((to), (from), (n)), 0) /* Returns 0 if exception not found and fixup otherwise. */
#define copy_from_user(to, from, n) (memcpy((to), (from), (n)), 0) extern unsigned long search_exception_table(unsigned long);
#define __copy_to_user(to, from, n) (copy_to_user((to), (from), (n))) #ifndef CONFIG_MMU
#define __copy_from_user(to, from, n) (copy_from_user((to), (from), (n)))
#define __copy_to_user_inatomic(to, from, n) \
(__copy_to_user((to), (from), (n)))
#define __copy_from_user_inatomic(to, from, n) \
(__copy_from_user((to), (from), (n)))
static inline unsigned long clear_user(void *addr, unsigned long size) /* Check against bounds of physical memory */
static inline int ___range_ok(unsigned long addr, unsigned long size)
{ {
if (access_ok(VERIFY_WRITE, addr, size)) return ((addr < memory_start) ||
size = __clear_user(addr, size); ((addr + size) > memory_end));
return size;
} }
/* Returns 0 if exception not found and fixup otherwise. */ #define __range_ok(addr, size) \
extern unsigned long search_exception_table(unsigned long); ___range_ok((unsigned long)(addr), (unsigned long)(size))
extern long strncpy_from_user(char *dst, const char *src, long count); #define access_ok(type, addr, size) (__range_ok((addr), (size)) == 0)
extern long strnlen_user(const char *src, long count);
#else /* CONFIG_MMU */ #else
/* /*
* Address is valid if: * Address is valid if:
...@@ -129,24 +101,88 @@ extern long strnlen_user(const char *src, long count); ...@@ -129,24 +101,88 @@ extern long strnlen_user(const char *src, long count);
/* || printk("access_ok failed for %s at 0x%08lx (size %d), seg 0x%08x\n", /* || printk("access_ok failed for %s at 0x%08lx (size %d), seg 0x%08x\n",
type?"WRITE":"READ",addr,size,get_fs().seg)) */ type?"WRITE":"READ",addr,size,get_fs().seg)) */
/* #endif
* All the __XXX versions macros/functions below do not perform
* access checking. It is assumed that the necessary checks have been
* already performed before the finction (macro) is called.
*/
#define get_user(x, ptr) \ #ifdef CONFIG_MMU
({ \ # define __FIXUP_SECTION ".section .fixup,\"ax\"\n"
access_ok(VERIFY_READ, (ptr), sizeof(*(ptr))) \ # define __EX_TABLE_SECTION ".section __ex_table,\"a\"\n"
? __get_user((x), (ptr)) : -EFAULT; \ #else
}) # define __FIXUP_SECTION ".section .discard,\"ax\"\n"
# define __EX_TABLE_SECTION ".section .discard,\"a\"\n"
#endif
#define put_user(x, ptr) \ extern unsigned long __copy_tofrom_user(void __user *to,
({ \ const void __user *from, unsigned long size);
access_ok(VERIFY_WRITE, (ptr), sizeof(*(ptr))) \
? __put_user((x), (ptr)) : -EFAULT; \ /* Return: number of not copied bytes, i.e. 0 if OK or non-zero if fail. */
static inline unsigned long __must_check __clear_user(void __user *to,
unsigned long n)
{
/* normal memset with two words to __ex_table */
__asm__ __volatile__ ( \
"1: sb r0, %2, r0;" \
" addik %0, %0, -1;" \
" bneid %0, 1b;" \
" addik %2, %2, 1;" \
"2: " \
__EX_TABLE_SECTION \
".word 1b,2b;" \
".previous;" \
: "=r"(n) \
: "0"(n), "r"(to)
);
return n;
}
static inline unsigned long __must_check clear_user(void __user *to,
unsigned long n)
{
might_sleep();
if (unlikely(!access_ok(VERIFY_WRITE, to, n)))
return n;
return __clear_user(to, n);
}
/* put_user and get_user macros */
extern long __user_bad(void);
#define __get_user_asm(insn, __gu_ptr, __gu_val, __gu_err) \
({ \
__asm__ __volatile__ ( \
"1:" insn " %1, %2, r0;" \
" addk %0, r0, r0;" \
"2: " \
__FIXUP_SECTION \
"3: brid 2b;" \
" addik %0, r0, %3;" \
".previous;" \
__EX_TABLE_SECTION \
".word 1b,3b;" \
".previous;" \
: "=&r"(__gu_err), "=r"(__gu_val) \
: "r"(__gu_ptr), "i"(-EFAULT) \
); \
}) })
/**
* get_user: - Get a simple variable from user space.
* @x: Variable to store result.
* @ptr: Source address, in user space.
*
* Context: User context only. This function may sleep.
*
* This macro copies a single simple variable from user space to kernel
* space. It supports simple types like char and int, but not larger
* data types like structures or arrays.
*
* @ptr must have pointer-to-simple-variable type, and the result of
* dereferencing @ptr must be assignable to @x without a cast.
*
* Returns zero on success, or -EFAULT on error.
* On error, the variable @x is set to zero.
*/
#define __get_user(x, ptr) \ #define __get_user(x, ptr) \
({ \ ({ \
unsigned long __gu_val; \ unsigned long __gu_val; \
...@@ -163,30 +199,74 @@ extern long strnlen_user(const char *src, long count); ...@@ -163,30 +199,74 @@ extern long strnlen_user(const char *src, long count);
__get_user_asm("lw", (ptr), __gu_val, __gu_err); \ __get_user_asm("lw", (ptr), __gu_val, __gu_err); \
break; \ break; \
default: \ default: \
__gu_val = 0; __gu_err = -EINVAL; \ /* __gu_val = 0; __gu_err = -EINVAL;*/ __gu_err = __user_bad();\
} \ } \
x = (__typeof__(*(ptr))) __gu_val; \ x = (__typeof__(*(ptr))) __gu_val; \
__gu_err; \ __gu_err; \
}) })
#define __get_user_asm(insn, __gu_ptr, __gu_val, __gu_err) \
#define get_user(x, ptr) \
({ \ ({ \
__asm__ __volatile__ ( \ access_ok(VERIFY_READ, (ptr), sizeof(*(ptr))) \
"1:" insn " %1, %2, r0; \ ? __get_user((x), (ptr)) : -EFAULT; \
addk %0, r0, r0; \ })
2: \
.section .fixup,\"ax\"; \ #define __put_user_asm(insn, __gu_ptr, __gu_val, __gu_err) \
3: brid 2b; \ ({ \
addik %0, r0, %3; \ __asm__ __volatile__ ( \
.previous; \ "1:" insn " %1, %2, r0;" \
.section __ex_table,\"a\"; \ " addk %0, r0, r0;" \
.word 1b,3b; \ "2: " \
.previous;" \ __FIXUP_SECTION \
: "=r"(__gu_err), "=r"(__gu_val) \ "3: brid 2b;" \
: "r"(__gu_ptr), "i"(-EFAULT) \ " addik %0, r0, %3;" \
); \ ".previous;" \
__EX_TABLE_SECTION \
".word 1b,3b;" \
".previous;" \
: "=&r"(__gu_err) \
: "r"(__gu_val), "r"(__gu_ptr), "i"(-EFAULT) \
); \
}) })
#define __put_user_asm_8(__gu_ptr, __gu_val, __gu_err) \
({ \
__asm__ __volatile__ (" lwi %0, %1, 0;" \
"1: swi %0, %2, 0;" \
" lwi %0, %1, 4;" \
"2: swi %0, %2, 4;" \
" addk %0, r0, r0;" \
"3: " \
__FIXUP_SECTION \
"4: brid 3b;" \
" addik %0, r0, %3;" \
".previous;" \
__EX_TABLE_SECTION \
".word 1b,4b,2b,4b;" \
".previous;" \
: "=&r"(__gu_err) \
: "r"(&__gu_val), "r"(__gu_ptr), "i"(-EFAULT) \
); \
})
/**
* put_user: - Write a simple value into user space.
* @x: Value to copy to user space.
* @ptr: Destination address, in user space.
*
* Context: User context only. This function may sleep.
*
* This macro copies a single simple value from kernel space to user
* space. It supports simple types like char and int, but not larger
* data types like structures or arrays.
*
* @ptr must have pointer-to-simple-variable type, and @x must be assignable
* to the result of dereferencing @ptr.
*
* Returns zero on success, or -EFAULT on error.
*/
#define __put_user(x, ptr) \ #define __put_user(x, ptr) \
({ \ ({ \
__typeof__(*(ptr)) volatile __gu_val = (x); \ __typeof__(*(ptr)) volatile __gu_val = (x); \
...@@ -195,7 +275,7 @@ extern long strnlen_user(const char *src, long count); ...@@ -195,7 +275,7 @@ extern long strnlen_user(const char *src, long count);
case 1: \ case 1: \
__put_user_asm("sb", (ptr), __gu_val, __gu_err); \ __put_user_asm("sb", (ptr), __gu_val, __gu_err); \
break; \ break; \
case 2: \ case 2: \
__put_user_asm("sh", (ptr), __gu_val, __gu_err); \ __put_user_asm("sh", (ptr), __gu_val, __gu_err); \
break; \ break; \
case 4: \ case 4: \
...@@ -205,121 +285,82 @@ extern long strnlen_user(const char *src, long count); ...@@ -205,121 +285,82 @@ extern long strnlen_user(const char *src, long count);
__put_user_asm_8((ptr), __gu_val, __gu_err); \ __put_user_asm_8((ptr), __gu_val, __gu_err); \
break; \ break; \
default: \ default: \
__gu_err = -EINVAL; \ /*__gu_err = -EINVAL;*/ __gu_err = __user_bad(); \
} \ } \
__gu_err; \ __gu_err; \
}) })
#define __put_user_asm_8(__gu_ptr, __gu_val, __gu_err) \ #ifndef CONFIG_MMU
({ \
__asm__ __volatile__ (" lwi %0, %1, 0; \
1: swi %0, %2, 0; \
lwi %0, %1, 4; \
2: swi %0, %2, 4; \
addk %0,r0,r0; \
3: \
.section .fixup,\"ax\"; \
4: brid 3b; \
addik %0, r0, %3; \
.previous; \
.section __ex_table,\"a\"; \
.word 1b,4b,2b,4b; \
.previous;" \
: "=&r"(__gu_err) \
: "r"(&__gu_val), \
"r"(__gu_ptr), "i"(-EFAULT) \
); \
})
#define __put_user_asm(insn, __gu_ptr, __gu_val, __gu_err) \ #define put_user(x, ptr) __put_user((x), (ptr))
({ \
__asm__ __volatile__ ( \
"1:" insn " %1, %2, r0; \
addk %0, r0, r0; \
2: \
.section .fixup,\"ax\"; \
3: brid 2b; \
addik %0, r0, %3; \
.previous; \
.section __ex_table,\"a\"; \
.word 1b,3b; \
.previous;" \
: "=r"(__gu_err) \
: "r"(__gu_val), "r"(__gu_ptr), "i"(-EFAULT) \
); \
})
/* #else /* CONFIG_MMU */
* Return: number of not copied bytes, i.e. 0 if OK or non-zero if fail.
*/
static inline int clear_user(char *to, int size)
{
if (size && access_ok(VERIFY_WRITE, to, size)) {
__asm__ __volatile__ (" \
1: \
sb r0, %2, r0; \
addik %0, %0, -1; \
bneid %0, 1b; \
addik %2, %2, 1; \
2: \
.section __ex_table,\"a\"; \
.word 1b,2b; \
.section .text;" \
: "=r"(size) \
: "0"(size), "r"(to)
);
}
return size;
}
#define __copy_from_user(to, from, n) copy_from_user((to), (from), (n)) #define put_user(x, ptr) \
({ \
access_ok(VERIFY_WRITE, (ptr), sizeof(*(ptr))) \
? __put_user((x), (ptr)) : -EFAULT; \
})
#endif /* CONFIG_MMU */
/* copy_to_from_user */
#define __copy_from_user(to, from, n) \
__copy_tofrom_user((__force void __user *)(to), \
(void __user *)(from), (n))
#define __copy_from_user_inatomic(to, from, n) \ #define __copy_from_user_inatomic(to, from, n) \
copy_from_user((to), (from), (n)) copy_from_user((to), (from), (n))
#define copy_to_user(to, from, n) \ static inline long copy_from_user(void *to,
(access_ok(VERIFY_WRITE, (to), (n)) ? \ const void __user *from, unsigned long n)
__copy_tofrom_user((void __user *)(to), \ {
(__force const void __user *)(from), (n)) \ might_sleep();
: -EFAULT) if (access_ok(VERIFY_READ, from, n))
return __copy_from_user(to, from, n);
return n;
}
#define __copy_to_user(to, from, n) copy_to_user((to), (from), (n)) #define __copy_to_user(to, from, n) \
__copy_tofrom_user((void __user *)(to), \
(__force const void __user *)(from), (n))
#define __copy_to_user_inatomic(to, from, n) copy_to_user((to), (from), (n)) #define __copy_to_user_inatomic(to, from, n) copy_to_user((to), (from), (n))
#define copy_from_user(to, from, n) \ static inline long copy_to_user(void __user *to,
(access_ok(VERIFY_READ, (from), (n)) ? \ const void *from, unsigned long n)
__copy_tofrom_user((__force void __user *)(to), \ {
(void __user *)(from), (n)) \ might_sleep();
: -EFAULT) if (access_ok(VERIFY_WRITE, to, n))
return __copy_to_user(to, from, n);
return n;
}
/*
* Copy a null terminated string from userspace.
*/
extern int __strncpy_user(char *to, const char __user *from, int len); extern int __strncpy_user(char *to, const char __user *from, int len);
extern int __strnlen_user(const char __user *sstr, int len);
#define strncpy_from_user(to, from, len) \ #define __strncpy_from_user __strncpy_user
(access_ok(VERIFY_READ, from, 1) ? \
__strncpy_user(to, from, len) : -EFAULT)
#define strnlen_user(str, len) \
(access_ok(VERIFY_READ, str, 1) ? __strnlen_user(str, len) : 0)
#endif /* CONFIG_MMU */ static inline long
strncpy_from_user(char *dst, const char __user *src, long count)
extern unsigned long __copy_tofrom_user(void __user *to, {
const void __user *from, unsigned long size); if (!access_ok(VERIFY_READ, src, 1))
return -EFAULT;
return __strncpy_from_user(dst, src, count);
}
/* /*
* The exception table consists of pairs of addresses: the first is the * Return the size of a string (including the ending 0)
* address of an instruction that is allowed to fault, and the second is
* the address at which the program should continue. No registers are
* modified, so it is entirely up to the continuation code to figure out
* what to do.
* *
* All the routines below use bits of fixup code that are out of line * Return 0 on exception, a value greater than N if too long
* with the main instruction path. This means when everything is well,
* we don't even have to jump over them. Further, they do not intrude
* on our cache or tlb entries.
*/ */
struct exception_table_entry { extern int __strnlen_user(const char __user *sstr, int len);
unsigned long insn, fixup;
}; static inline long strnlen_user(const char __user *src, long n)
{
if (!access_ok(VERIFY_READ, src, 1))
return 0;
return __strnlen_user(src, n);
}
#endif /* __ASSEMBLY__ */ #endif /* __ASSEMBLY__ */
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */
......
...@@ -37,7 +37,7 @@ static inline void __dma_sync_page(unsigned long paddr, unsigned long offset, ...@@ -37,7 +37,7 @@ static inline void __dma_sync_page(unsigned long paddr, unsigned long offset,
static unsigned long get_dma_direct_offset(struct device *dev) static unsigned long get_dma_direct_offset(struct device *dev)
{ {
if (dev) if (likely(dev))
return (unsigned long)dev->archdata.dma_data; return (unsigned long)dev->archdata.dma_data;
return PCI_DRAM_OFFSET; /* FIXME Not sure if is correct */ return PCI_DRAM_OFFSET; /* FIXME Not sure if is correct */
......
...@@ -51,6 +51,12 @@ swapper_pg_dir: ...@@ -51,6 +51,12 @@ swapper_pg_dir:
.text .text
ENTRY(_start) ENTRY(_start)
#if CONFIG_KERNEL_BASE_ADDR == 0
brai TOPHYS(real_start)
.org 0x100
real_start:
#endif
mfs r1, rmsr mfs r1, rmsr
andi r1, r1, ~2 andi r1, r1, ~2
mts rmsr, r1 mts rmsr, r1
...@@ -99,8 +105,8 @@ no_fdt_arg: ...@@ -99,8 +105,8 @@ no_fdt_arg:
tophys(r4,r4) /* convert to phys address */ tophys(r4,r4) /* convert to phys address */
ori r3, r0, COMMAND_LINE_SIZE - 1 /* number of loops */ ori r3, r0, COMMAND_LINE_SIZE - 1 /* number of loops */
_copy_command_line: _copy_command_line:
lbu r2, r5, r6 /* r7=r5+r6 - r5 contain pointer to command line */ lbu r2, r5, r6 /* r2=r5+r6 - r5 contain pointer to command line */
sb r2, r4, r6 /* addr[r4+r6]= r7*/ sb r2, r4, r6 /* addr[r4+r6]= r2*/
addik r6, r6, 1 /* increment counting */ addik r6, r6, 1 /* increment counting */
bgtid r3, _copy_command_line /* loop for all entries */ bgtid r3, _copy_command_line /* loop for all entries */
addik r3, r3, -1 /* descrement loop */ addik r3, r3, -1 /* descrement loop */
...@@ -128,7 +134,7 @@ _copy_bram: ...@@ -128,7 +134,7 @@ _copy_bram:
* virtual to physical. * virtual to physical.
*/ */
nop nop
addik r3, r0, 63 /* Invalidate all TLB entries */ addik r3, r0, MICROBLAZE_TLB_SIZE -1 /* Invalidate all TLB entries */
_invalidate: _invalidate:
mts rtlbx, r3 mts rtlbx, r3
mts rtlbhi, r0 /* flush: ensure V is clear */ mts rtlbhi, r0 /* flush: ensure V is clear */
......
...@@ -313,13 +313,13 @@ _hw_exception_handler: ...@@ -313,13 +313,13 @@ _hw_exception_handler:
mfs r5, rmsr; mfs r5, rmsr;
nop nop
swi r5, r1, 0; swi r5, r1, 0;
mfs r3, resr mfs r4, resr
nop nop
mfs r4, rear; mfs r3, rear;
nop nop
#ifndef CONFIG_MMU #ifndef CONFIG_MMU
andi r5, r3, 0x1000; /* Check ESR[DS] */ andi r5, r4, 0x1000; /* Check ESR[DS] */
beqi r5, not_in_delay_slot; /* Branch if ESR[DS] not set */ beqi r5, not_in_delay_slot; /* Branch if ESR[DS] not set */
mfs r17, rbtr; /* ESR[DS] set - return address in BTR */ mfs r17, rbtr; /* ESR[DS] set - return address in BTR */
nop nop
...@@ -327,13 +327,14 @@ not_in_delay_slot: ...@@ -327,13 +327,14 @@ not_in_delay_slot:
swi r17, r1, PT_R17 swi r17, r1, PT_R17
#endif #endif
andi r5, r3, 0x1F; /* Extract ESR[EXC] */ andi r5, r4, 0x1F; /* Extract ESR[EXC] */
#ifdef CONFIG_MMU #ifdef CONFIG_MMU
/* Calculate exception vector offset = r5 << 2 */ /* Calculate exception vector offset = r5 << 2 */
addk r6, r5, r5; /* << 1 */ addk r6, r5, r5; /* << 1 */
addk r6, r6, r6; /* << 2 */ addk r6, r6, r6; /* << 2 */
#ifdef DEBUG
/* counting which exception happen */ /* counting which exception happen */
lwi r5, r0, 0x200 + TOPHYS(r0_ram) lwi r5, r0, 0x200 + TOPHYS(r0_ram)
addi r5, r5, 1 addi r5, r5, 1
...@@ -341,6 +342,7 @@ not_in_delay_slot: ...@@ -341,6 +342,7 @@ not_in_delay_slot:
lwi r5, r6, 0x200 + TOPHYS(r0_ram) lwi r5, r6, 0x200 + TOPHYS(r0_ram)
addi r5, r5, 1 addi r5, r5, 1
swi r5, r6, 0x200 + TOPHYS(r0_ram) swi r5, r6, 0x200 + TOPHYS(r0_ram)
#endif
/* end */ /* end */
/* Load the HW Exception vector */ /* Load the HW Exception vector */
lwi r6, r6, TOPHYS(_MB_HW_ExceptionVectorTable) lwi r6, r6, TOPHYS(_MB_HW_ExceptionVectorTable)
...@@ -376,7 +378,7 @@ handle_other_ex: /* Handle Other exceptions here */ ...@@ -376,7 +378,7 @@ handle_other_ex: /* Handle Other exceptions here */
swi r18, r1, PT_R18 swi r18, r1, PT_R18
or r5, r1, r0 or r5, r1, r0
andi r6, r3, 0x1F; /* Load ESR[EC] */ andi r6, r4, 0x1F; /* Load ESR[EC] */
lwi r7, r0, PER_CPU(KM) /* MS: saving current kernel mode to regs */ lwi r7, r0, PER_CPU(KM) /* MS: saving current kernel mode to regs */
swi r7, r1, PT_MODE swi r7, r1, PT_MODE
mfs r7, rfsr mfs r7, rfsr
...@@ -426,11 +428,11 @@ handle_other_ex: /* Handle Other exceptions here */ ...@@ -426,11 +428,11 @@ handle_other_ex: /* Handle Other exceptions here */
*/ */
handle_unaligned_ex: handle_unaligned_ex:
/* Working registers already saved: R3, R4, R5, R6 /* Working registers already saved: R3, R4, R5, R6
* R3 = ESR * R4 = ESR
* R4 = EAR * R3 = EAR
*/ */
#ifdef CONFIG_MMU #ifdef CONFIG_MMU
andi r6, r3, 0x1000 /* Check ESR[DS] */ andi r6, r4, 0x1000 /* Check ESR[DS] */
beqi r6, _no_delayslot /* Branch if ESR[DS] not set */ beqi r6, _no_delayslot /* Branch if ESR[DS] not set */
mfs r17, rbtr; /* ESR[DS] set - return address in BTR */ mfs r17, rbtr; /* ESR[DS] set - return address in BTR */
nop nop
...@@ -439,7 +441,7 @@ _no_delayslot: ...@@ -439,7 +441,7 @@ _no_delayslot:
RESTORE_STATE; RESTORE_STATE;
bri unaligned_data_trap bri unaligned_data_trap
#endif #endif
andi r6, r3, 0x3E0; /* Mask and extract the register operand */ andi r6, r4, 0x3E0; /* Mask and extract the register operand */
srl r6, r6; /* r6 >> 5 */ srl r6, r6; /* r6 >> 5 */
srl r6, r6; srl r6, r6;
srl r6, r6; srl r6, r6;
...@@ -448,33 +450,33 @@ _no_delayslot: ...@@ -448,33 +450,33 @@ _no_delayslot:
/* Store the register operand in a temporary location */ /* Store the register operand in a temporary location */
sbi r6, r0, TOPHYS(ex_reg_op); sbi r6, r0, TOPHYS(ex_reg_op);
andi r6, r3, 0x400; /* Extract ESR[S] */ andi r6, r4, 0x400; /* Extract ESR[S] */
bnei r6, ex_sw; bnei r6, ex_sw;
ex_lw: ex_lw:
andi r6, r3, 0x800; /* Extract ESR[W] */ andi r6, r4, 0x800; /* Extract ESR[W] */
beqi r6, ex_lhw; beqi r6, ex_lhw;
lbui r5, r4, 0; /* Exception address in r4 */ lbui r5, r3, 0; /* Exception address in r3 */
/* Load a word, byte-by-byte from destination address /* Load a word, byte-by-byte from destination address
and save it in tmp space */ and save it in tmp space */
sbi r5, r0, TOPHYS(ex_tmp_data_loc_0); sbi r5, r0, TOPHYS(ex_tmp_data_loc_0);
lbui r5, r4, 1; lbui r5, r3, 1;
sbi r5, r0, TOPHYS(ex_tmp_data_loc_1); sbi r5, r0, TOPHYS(ex_tmp_data_loc_1);
lbui r5, r4, 2; lbui r5, r3, 2;
sbi r5, r0, TOPHYS(ex_tmp_data_loc_2); sbi r5, r0, TOPHYS(ex_tmp_data_loc_2);
lbui r5, r4, 3; lbui r5, r3, 3;
sbi r5, r0, TOPHYS(ex_tmp_data_loc_3); sbi r5, r0, TOPHYS(ex_tmp_data_loc_3);
/* Get the destination register value into r3 */ /* Get the destination register value into r4 */
lwi r3, r0, TOPHYS(ex_tmp_data_loc_0); lwi r4, r0, TOPHYS(ex_tmp_data_loc_0);
bri ex_lw_tail; bri ex_lw_tail;
ex_lhw: ex_lhw:
lbui r5, r4, 0; /* Exception address in r4 */ lbui r5, r3, 0; /* Exception address in r3 */
/* Load a half-word, byte-by-byte from destination /* Load a half-word, byte-by-byte from destination
address and save it in tmp space */ address and save it in tmp space */
sbi r5, r0, TOPHYS(ex_tmp_data_loc_0); sbi r5, r0, TOPHYS(ex_tmp_data_loc_0);
lbui r5, r4, 1; lbui r5, r3, 1;
sbi r5, r0, TOPHYS(ex_tmp_data_loc_1); sbi r5, r0, TOPHYS(ex_tmp_data_loc_1);
/* Get the destination register value into r3 */ /* Get the destination register value into r4 */
lhui r3, r0, TOPHYS(ex_tmp_data_loc_0); lhui r4, r0, TOPHYS(ex_tmp_data_loc_0);
ex_lw_tail: ex_lw_tail:
/* Get the destination register number into r5 */ /* Get the destination register number into r5 */
lbui r5, r0, TOPHYS(ex_reg_op); lbui r5, r0, TOPHYS(ex_reg_op);
...@@ -502,25 +504,25 @@ ex_sw_tail: ...@@ -502,25 +504,25 @@ ex_sw_tail:
andi r6, r6, 0x800; /* Extract ESR[W] */ andi r6, r6, 0x800; /* Extract ESR[W] */
beqi r6, ex_shw; beqi r6, ex_shw;
/* Get the word - delay slot */ /* Get the word - delay slot */
swi r3, r0, TOPHYS(ex_tmp_data_loc_0); swi r4, r0, TOPHYS(ex_tmp_data_loc_0);
/* Store the word, byte-by-byte into destination address */ /* Store the word, byte-by-byte into destination address */
lbui r3, r0, TOPHYS(ex_tmp_data_loc_0); lbui r4, r0, TOPHYS(ex_tmp_data_loc_0);
sbi r3, r4, 0; sbi r4, r3, 0;
lbui r3, r0, TOPHYS(ex_tmp_data_loc_1); lbui r4, r0, TOPHYS(ex_tmp_data_loc_1);
sbi r3, r4, 1; sbi r4, r3, 1;
lbui r3, r0, TOPHYS(ex_tmp_data_loc_2); lbui r4, r0, TOPHYS(ex_tmp_data_loc_2);
sbi r3, r4, 2; sbi r4, r3, 2;
lbui r3, r0, TOPHYS(ex_tmp_data_loc_3); lbui r4, r0, TOPHYS(ex_tmp_data_loc_3);
sbi r3, r4, 3; sbi r4, r3, 3;
bri ex_handler_done; bri ex_handler_done;
ex_shw: ex_shw:
/* Store the lower half-word, byte-by-byte into destination address */ /* Store the lower half-word, byte-by-byte into destination address */
swi r3, r0, TOPHYS(ex_tmp_data_loc_0); swi r4, r0, TOPHYS(ex_tmp_data_loc_0);
lbui r3, r0, TOPHYS(ex_tmp_data_loc_2); lbui r4, r0, TOPHYS(ex_tmp_data_loc_2);
sbi r3, r4, 0; sbi r4, r3, 0;
lbui r3, r0, TOPHYS(ex_tmp_data_loc_3); lbui r4, r0, TOPHYS(ex_tmp_data_loc_3);
sbi r3, r4, 1; sbi r4, r3, 1;
ex_sw_end: /* Exception handling of store word, ends. */ ex_sw_end: /* Exception handling of store word, ends. */
ex_handler_done: ex_handler_done:
...@@ -560,21 +562,16 @@ ex_handler_done: ...@@ -560,21 +562,16 @@ ex_handler_done:
*/ */
mfs r11, rpid mfs r11, rpid
nop nop
bri 4
mfs r3, rear /* Get faulting address */
nop
/* If we are faulting a kernel address, we have to use the /* If we are faulting a kernel address, we have to use the
* kernel page tables. * kernel page tables.
*/ */
ori r4, r0, CONFIG_KERNEL_START ori r5, r0, CONFIG_KERNEL_START
cmpu r4, r3, r4 cmpu r5, r3, r5
bgti r4, ex3 bgti r5, ex3
/* First, check if it was a zone fault (which means a user /* First, check if it was a zone fault (which means a user
* tried to access a kernel or read-protected page - always * tried to access a kernel or read-protected page - always
* a SEGV). All other faults here must be stores, so no * a SEGV). All other faults here must be stores, so no
* need to check ESR_S as well. */ * need to check ESR_S as well. */
mfs r4, resr
nop
andi r4, r4, 0x800 /* ESR_Z - zone protection */ andi r4, r4, 0x800 /* ESR_Z - zone protection */
bnei r4, ex2 bnei r4, ex2
...@@ -589,8 +586,6 @@ ex_handler_done: ...@@ -589,8 +586,6 @@ ex_handler_done:
* tried to access a kernel or read-protected page - always * tried to access a kernel or read-protected page - always
* a SEGV). All other faults here must be stores, so no * a SEGV). All other faults here must be stores, so no
* need to check ESR_S as well. */ * need to check ESR_S as well. */
mfs r4, resr
nop
andi r4, r4, 0x800 /* ESR_Z */ andi r4, r4, 0x800 /* ESR_Z */
bnei r4, ex2 bnei r4, ex2
/* get current task address */ /* get current task address */
...@@ -665,8 +660,6 @@ ex_handler_done: ...@@ -665,8 +660,6 @@ ex_handler_done:
* R3 = ESR * R3 = ESR
*/ */
mfs r3, rear /* Get faulting address */
nop
RESTORE_STATE; RESTORE_STATE;
bri page_fault_instr_trap bri page_fault_instr_trap
...@@ -677,18 +670,15 @@ ex_handler_done: ...@@ -677,18 +670,15 @@ ex_handler_done:
*/ */
handle_data_tlb_miss_exception: handle_data_tlb_miss_exception:
/* Working registers already saved: R3, R4, R5, R6 /* Working registers already saved: R3, R4, R5, R6
* R3 = ESR * R3 = EAR, R4 = ESR
*/ */
mfs r11, rpid mfs r11, rpid
nop nop
bri 4
mfs r3, rear /* Get faulting address */
nop
/* If we are faulting a kernel address, we have to use the /* If we are faulting a kernel address, we have to use the
* kernel page tables. */ * kernel page tables. */
ori r4, r0, CONFIG_KERNEL_START ori r6, r0, CONFIG_KERNEL_START
cmpu r4, r3, r4 cmpu r4, r3, r6
bgti r4, ex5 bgti r4, ex5
ori r4, r0, swapper_pg_dir ori r4, r0, swapper_pg_dir
mts rpid, r0 /* TLB will have 0 TID */ mts rpid, r0 /* TLB will have 0 TID */
...@@ -731,9 +721,8 @@ ex_handler_done: ...@@ -731,9 +721,8 @@ ex_handler_done:
* Many of these bits are software only. Bits we don't set * Many of these bits are software only. Bits we don't set
* here we (properly should) assume have the appropriate value. * here we (properly should) assume have the appropriate value.
*/ */
brid finish_tlb_load
andni r4, r4, 0x0ce2 /* Make sure 20, 21 are zero */ andni r4, r4, 0x0ce2 /* Make sure 20, 21 are zero */
bri finish_tlb_load
ex7: ex7:
/* The bailout. Restore registers to pre-exception conditions /* The bailout. Restore registers to pre-exception conditions
* and call the heavyweights to help us out. * and call the heavyweights to help us out.
...@@ -754,9 +743,6 @@ ex_handler_done: ...@@ -754,9 +743,6 @@ ex_handler_done:
*/ */
mfs r11, rpid mfs r11, rpid
nop nop
bri 4
mfs r3, rear /* Get faulting address */
nop
/* If we are faulting a kernel address, we have to use the /* If we are faulting a kernel address, we have to use the
* kernel page tables. * kernel page tables.
...@@ -792,7 +778,7 @@ ex_handler_done: ...@@ -792,7 +778,7 @@ ex_handler_done:
lwi r4, r5, 0 /* Get Linux PTE */ lwi r4, r5, 0 /* Get Linux PTE */
andi r6, r4, _PAGE_PRESENT andi r6, r4, _PAGE_PRESENT
beqi r6, ex7 beqi r6, ex10
ori r4, r4, _PAGE_ACCESSED ori r4, r4, _PAGE_ACCESSED
swi r4, r5, 0 swi r4, r5, 0
...@@ -805,9 +791,8 @@ ex_handler_done: ...@@ -805,9 +791,8 @@ ex_handler_done:
* Many of these bits are software only. Bits we don't set * Many of these bits are software only. Bits we don't set
* here we (properly should) assume have the appropriate value. * here we (properly should) assume have the appropriate value.
*/ */
brid finish_tlb_load
andni r4, r4, 0x0ce2 /* Make sure 20, 21 are zero */ andni r4, r4, 0x0ce2 /* Make sure 20, 21 are zero */
bri finish_tlb_load
ex10: ex10:
/* The bailout. Restore registers to pre-exception conditions /* The bailout. Restore registers to pre-exception conditions
* and call the heavyweights to help us out. * and call the heavyweights to help us out.
...@@ -837,9 +822,9 @@ ex_handler_done: ...@@ -837,9 +822,9 @@ ex_handler_done:
andi r5, r5, (MICROBLAZE_TLB_SIZE-1) andi r5, r5, (MICROBLAZE_TLB_SIZE-1)
ori r6, r0, 1 ori r6, r0, 1
cmp r31, r5, r6 cmp r31, r5, r6
blti r31, sem blti r31, ex12
addik r5, r6, 1 addik r5, r6, 1
sem: ex12:
/* MS: save back current TLB index */ /* MS: save back current TLB index */
swi r5, r0, TOPHYS(tlb_index) swi r5, r0, TOPHYS(tlb_index)
...@@ -859,7 +844,6 @@ ex_handler_done: ...@@ -859,7 +844,6 @@ ex_handler_done:
nop nop
/* Done...restore registers and get out of here. */ /* Done...restore registers and get out of here. */
ex12:
mts rpid, r11 mts rpid, r11
nop nop
bri 4 bri 4
......
...@@ -26,9 +26,10 @@ ...@@ -26,9 +26,10 @@
* We avoid flushing the pinned 0, 1 and possibly 2 entries. * We avoid flushing the pinned 0, 1 and possibly 2 entries.
*/ */
.globl _tlbia; .globl _tlbia;
.type _tlbia, @function
.align 4; .align 4;
_tlbia: _tlbia:
addik r12, r0, 63 /* flush all entries (63 - 3) */ addik r12, r0, MICROBLAZE_TLB_SIZE - 1 /* flush all entries (63 - 3) */
/* isync */ /* isync */
_tlbia_1: _tlbia_1:
mts rtlbx, r12 mts rtlbx, r12
...@@ -41,11 +42,13 @@ _tlbia_1: ...@@ -41,11 +42,13 @@ _tlbia_1:
/* sync */ /* sync */
rtsd r15, 8 rtsd r15, 8
nop nop
.size _tlbia, . - _tlbia
/* /*
* Flush MMU TLB for a particular address (in r5) * Flush MMU TLB for a particular address (in r5)
*/ */
.globl _tlbie; .globl _tlbie;
.type _tlbie, @function
.align 4; .align 4;
_tlbie: _tlbie:
mts rtlbsx, r5 /* look up the address in TLB */ mts rtlbsx, r5 /* look up the address in TLB */
...@@ -59,17 +62,20 @@ _tlbie_1: ...@@ -59,17 +62,20 @@ _tlbie_1:
rtsd r15, 8 rtsd r15, 8
nop nop
.size _tlbie, . - _tlbie
/* /*
* Allocate TLB entry for early console * Allocate TLB entry for early console
*/ */
.globl early_console_reg_tlb_alloc; .globl early_console_reg_tlb_alloc;
.type early_console_reg_tlb_alloc, @function
.align 4; .align 4;
early_console_reg_tlb_alloc: early_console_reg_tlb_alloc:
/* /*
* Load a TLB entry for the UART, so that microblaze_progress() can use * Load a TLB entry for the UART, so that microblaze_progress() can use
* the UARTs nice and early. We use a 4k real==virtual mapping. * the UARTs nice and early. We use a 4k real==virtual mapping.
*/ */
ori r4, r0, 63 ori r4, r0, MICROBLAZE_TLB_SIZE - 1
mts rtlbx, r4 /* TLB slot 2 */ mts rtlbx, r4 /* TLB slot 2 */
or r4,r5,r0 or r4,r5,r0
...@@ -86,6 +92,8 @@ early_console_reg_tlb_alloc: ...@@ -86,6 +92,8 @@ early_console_reg_tlb_alloc:
rtsd r15, 8 rtsd r15, 8
nop nop
.size early_console_reg_tlb_alloc, . - early_console_reg_tlb_alloc
/* /*
* Copy a whole page (4096 bytes). * Copy a whole page (4096 bytes).
*/ */
...@@ -104,6 +112,7 @@ early_console_reg_tlb_alloc: ...@@ -104,6 +112,7 @@ early_console_reg_tlb_alloc:
#define DCACHE_LINE_BYTES (4 * 4) #define DCACHE_LINE_BYTES (4 * 4)
.globl copy_page; .globl copy_page;
.type copy_page, @function
.align 4; .align 4;
copy_page: copy_page:
ori r11, r0, (PAGE_SIZE/DCACHE_LINE_BYTES) - 1 ori r11, r0, (PAGE_SIZE/DCACHE_LINE_BYTES) - 1
...@@ -118,3 +127,5 @@ _copy_page_loop: ...@@ -118,3 +127,5 @@ _copy_page_loop:
addik r11, r11, -1 addik r11, r11, -1
rtsd r15, 8 rtsd r15, 8
nop nop
.size copy_page, . - copy_page
...@@ -15,6 +15,7 @@ ...@@ -15,6 +15,7 @@
#include <linux/bitops.h> #include <linux/bitops.h>
#include <asm/system.h> #include <asm/system.h>
#include <asm/pgalloc.h> #include <asm/pgalloc.h>
#include <asm/uaccess.h> /* for USER_DS macros */
#include <asm/cacheflush.h> #include <asm/cacheflush.h>
void show_regs(struct pt_regs *regs) void show_regs(struct pt_regs *regs)
...@@ -74,7 +75,10 @@ __setup("hlt", hlt_setup); ...@@ -74,7 +75,10 @@ __setup("hlt", hlt_setup);
void default_idle(void) void default_idle(void)
{ {
if (!hlt_counter) { if (likely(hlt_counter)) {
while (!need_resched())
cpu_relax();
} else {
clear_thread_flag(TIF_POLLING_NRFLAG); clear_thread_flag(TIF_POLLING_NRFLAG);
smp_mb__after_clear_bit(); smp_mb__after_clear_bit();
local_irq_disable(); local_irq_disable();
...@@ -82,9 +86,7 @@ void default_idle(void) ...@@ -82,9 +86,7 @@ void default_idle(void)
cpu_sleep(); cpu_sleep();
local_irq_enable(); local_irq_enable();
set_thread_flag(TIF_POLLING_NRFLAG); set_thread_flag(TIF_POLLING_NRFLAG);
} else }
while (!need_resched())
cpu_relax();
} }
void cpu_idle(void) void cpu_idle(void)
......
...@@ -92,6 +92,12 @@ inline unsigned get_romfs_len(unsigned *addr) ...@@ -92,6 +92,12 @@ inline unsigned get_romfs_len(unsigned *addr)
} }
#endif /* CONFIG_MTD_UCLINUX_EBSS */ #endif /* CONFIG_MTD_UCLINUX_EBSS */
#if defined(CONFIG_EARLY_PRINTK) && defined(CONFIG_SERIAL_UARTLITE_CONSOLE)
#define eprintk early_printk
#else
#define eprintk printk
#endif
void __init machine_early_init(const char *cmdline, unsigned int ram, void __init machine_early_init(const char *cmdline, unsigned int ram,
unsigned int fdt, unsigned int msr) unsigned int fdt, unsigned int msr)
{ {
...@@ -139,32 +145,32 @@ void __init machine_early_init(const char *cmdline, unsigned int ram, ...@@ -139,32 +145,32 @@ void __init machine_early_init(const char *cmdline, unsigned int ram,
setup_early_printk(NULL); setup_early_printk(NULL);
#endif #endif
early_printk("Ramdisk addr 0x%08x, ", ram); eprintk("Ramdisk addr 0x%08x, ", ram);
if (fdt) if (fdt)
early_printk("FDT at 0x%08x\n", fdt); eprintk("FDT at 0x%08x\n", fdt);
else else
early_printk("Compiled-in FDT at 0x%08x\n", eprintk("Compiled-in FDT at 0x%08x\n",
(unsigned int)_fdt_start); (unsigned int)_fdt_start);
#ifdef CONFIG_MTD_UCLINUX #ifdef CONFIG_MTD_UCLINUX
early_printk("Found romfs @ 0x%08x (0x%08x)\n", eprintk("Found romfs @ 0x%08x (0x%08x)\n",
romfs_base, romfs_size); romfs_base, romfs_size);
early_printk("#### klimit %p ####\n", old_klimit); eprintk("#### klimit %p ####\n", old_klimit);
BUG_ON(romfs_size < 0); /* What else can we do? */ BUG_ON(romfs_size < 0); /* What else can we do? */
early_printk("Moved 0x%08x bytes from 0x%08x to 0x%08x\n", eprintk("Moved 0x%08x bytes from 0x%08x to 0x%08x\n",
romfs_size, romfs_base, (unsigned)&_ebss); romfs_size, romfs_base, (unsigned)&_ebss);
early_printk("New klimit: 0x%08x\n", (unsigned)klimit); eprintk("New klimit: 0x%08x\n", (unsigned)klimit);
#endif #endif
#if CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR #if CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR
if (msr) if (msr)
early_printk("!!!Your kernel has setup MSR instruction but " eprintk("!!!Your kernel has setup MSR instruction but "
"CPU don't have it %d\n", msr); "CPU don't have it %d\n", msr);
#else #else
if (!msr) if (!msr)
early_printk("!!!Your kernel not setup MSR instruction but " eprintk("!!!Your kernel not setup MSR instruction but "
"CPU have it %d\n", msr); "CPU have it %d\n", msr);
#endif #endif
......
...@@ -22,13 +22,11 @@ void trap_init(void) ...@@ -22,13 +22,11 @@ void trap_init(void)
__enable_hw_exceptions(); __enable_hw_exceptions();
} }
static int kstack_depth_to_print = 24; static unsigned long kstack_depth_to_print = 24;
static int __init kstack_setup(char *s) static int __init kstack_setup(char *s)
{ {
kstack_depth_to_print = strict_strtoul(s, 0, NULL); return !strict_strtoul(s, 0, &kstack_depth_to_print);
return 1;
} }
__setup("kstack=", kstack_setup); __setup("kstack=", kstack_setup);
......
...@@ -10,5 +10,4 @@ else ...@@ -10,5 +10,4 @@ else
lib-y += memcpy.o memmove.o lib-y += memcpy.o memmove.o
endif endif
lib-$(CONFIG_NO_MMU) += uaccess.o lib-y += uaccess_old.o
lib-$(CONFIG_MMU) += uaccess_old.o
...@@ -30,8 +30,9 @@ ...@@ -30,8 +30,9 @@
*/ */
#include <linux/linkage.h> #include <linux/linkage.h>
.text
.globl memcpy .globl memcpy
.type memcpy, @function
.ent memcpy .ent memcpy
memcpy: memcpy:
...@@ -345,9 +346,11 @@ a_done: ...@@ -345,9 +346,11 @@ a_done:
rtsd r15, 8 rtsd r15, 8
nop nop
.size memcpy, . - memcpy
.end memcpy .end memcpy
/*----------------------------------------------------------------------------*/ /*----------------------------------------------------------------------------*/
.globl memmove .globl memmove
.type memmove, @function
.ent memmove .ent memmove
memmove: memmove:
...@@ -659,4 +662,5 @@ d_done: ...@@ -659,4 +662,5 @@ d_done:
rtsd r15, 8 rtsd r15, 8
nop nop
.size memmove, . - memmove
.end memmove .end memmove
...@@ -53,7 +53,7 @@ void *memcpy(void *v_dst, const void *v_src, __kernel_size_t c) ...@@ -53,7 +53,7 @@ void *memcpy(void *v_dst, const void *v_src, __kernel_size_t c)
const uint32_t *i_src; const uint32_t *i_src;
uint32_t *i_dst; uint32_t *i_dst;
if (c >= 4) { if (likely(c >= 4)) {
unsigned value, buf_hold; unsigned value, buf_hold;
/* Align the dstination to a word boundry. */ /* Align the dstination to a word boundry. */
......
...@@ -33,22 +33,23 @@ ...@@ -33,22 +33,23 @@
#ifdef __HAVE_ARCH_MEMSET #ifdef __HAVE_ARCH_MEMSET
void *memset(void *v_src, int c, __kernel_size_t n) void *memset(void *v_src, int c, __kernel_size_t n)
{ {
char *src = v_src; char *src = v_src;
#ifdef CONFIG_OPT_LIB_FUNCTION #ifdef CONFIG_OPT_LIB_FUNCTION
uint32_t *i_src; uint32_t *i_src;
uint32_t w32; uint32_t w32 = 0;
#endif #endif
/* Truncate c to 8 bits */ /* Truncate c to 8 bits */
c = (c & 0xFF); c = (c & 0xFF);
#ifdef CONFIG_OPT_LIB_FUNCTION #ifdef CONFIG_OPT_LIB_FUNCTION
/* Make a repeating word out of it */ if (unlikely(c)) {
w32 = c; /* Make a repeating word out of it */
w32 |= w32 << 8; w32 = c;
w32 |= w32 << 16; w32 |= w32 << 8;
w32 |= w32 << 16;
}
if (n >= 4) { if (likely(n >= 4)) {
/* Align the destination to a word boundary */ /* Align the destination to a word boundary */
/* This is done in an endian independant manner */ /* This is done in an endian independant manner */
switch ((unsigned) src & 3) { switch ((unsigned) src & 3) {
......
/*
* Copyright (C) 2006 Atmark Techno, Inc.
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#include <linux/string.h>
#include <asm/uaccess.h>
#include <asm/bug.h>
long strnlen_user(const char __user *src, long count)
{
return strlen(src) + 1;
}
#define __do_strncpy_from_user(dst, src, count, res) \
do { \
char *tmp; \
strncpy(dst, src, count); \
for (tmp = dst; *tmp && count > 0; tmp++, count--) \
; \
res = (tmp - dst); \
} while (0)
long __strncpy_from_user(char *dst, const char __user *src, long count)
{
long res;
__do_strncpy_from_user(dst, src, count, res);
return res;
}
long strncpy_from_user(char *dst, const char __user *src, long count)
{
long res = -EFAULT;
if (access_ok(VERIFY_READ, src, 1))
__do_strncpy_from_user(dst, src, count, res);
return res;
}
unsigned long __copy_tofrom_user(void __user *to,
const void __user *from, unsigned long size)
{
memcpy(to, from, size);
return 0;
}
...@@ -22,6 +22,7 @@ ...@@ -22,6 +22,7 @@
.text .text
.globl __strncpy_user; .globl __strncpy_user;
.type __strncpy_user, @function
.align 4; .align 4;
__strncpy_user: __strncpy_user:
...@@ -50,7 +51,7 @@ __strncpy_user: ...@@ -50,7 +51,7 @@ __strncpy_user:
3: 3:
rtsd r15,8 rtsd r15,8
nop nop
.size __strncpy_user, . - __strncpy_user
.section .fixup, "ax" .section .fixup, "ax"
.align 2 .align 2
...@@ -72,6 +73,7 @@ __strncpy_user: ...@@ -72,6 +73,7 @@ __strncpy_user:
.text .text
.globl __strnlen_user; .globl __strnlen_user;
.type __strnlen_user, @function
.align 4; .align 4;
__strnlen_user: __strnlen_user:
addik r3,r6,0 addik r3,r6,0
...@@ -90,7 +92,7 @@ __strnlen_user: ...@@ -90,7 +92,7 @@ __strnlen_user:
3: 3:
rtsd r15,8 rtsd r15,8
nop nop
.size __strnlen_user, . - __strnlen_user
.section .fixup,"ax" .section .fixup,"ax"
4: 4:
...@@ -108,6 +110,7 @@ __strnlen_user: ...@@ -108,6 +110,7 @@ __strnlen_user:
*/ */
.text .text
.globl __copy_tofrom_user; .globl __copy_tofrom_user;
.type __copy_tofrom_user, @function
.align 4; .align 4;
__copy_tofrom_user: __copy_tofrom_user:
/* /*
...@@ -116,20 +119,34 @@ __copy_tofrom_user: ...@@ -116,20 +119,34 @@ __copy_tofrom_user:
* r7, r3 - count * r7, r3 - count
* r4 - tempval * r4 - tempval
*/ */
addik r3,r7,0 beqid r7, 3f /* zero size is not likely */
beqi r3,3f andi r3, r7, 0x3 /* filter add count */
1: bneid r3, 4f /* if is odd value then byte copying */
lbu r4,r6,r0 or r3, r5, r6 /* find if is any to/from unaligned */
addik r6,r6,1 andi r3, r3, 0x3 /* mask unaligned */
2: bneid r3, 1f /* it is unaligned -> then jump */
sb r4,r5,r0 or r3, r0, r0
addik r3,r3,-1
bneid r3,1b /* at least one 4 byte copy */
addik r5,r5,1 /* delay slot */ 5: lw r4, r6, r3
6: sw r4, r5, r3
addik r7, r7, -4
bneid r7, 5b
addik r3, r3, 4
addik r3, r7, 0
rtsd r15, 8
nop
4: or r3, r0, r0
1: lbu r4,r6,r3
2: sb r4,r5,r3
addik r7,r7,-1
bneid r7,1b
addik r3,r3,1 /* delay slot */
3: 3:
addik r3,r7,0
rtsd r15,8 rtsd r15,8
nop nop
.size __copy_tofrom_user, . - __copy_tofrom_user
.section __ex_table,"a" .section __ex_table,"a"
.word 1b,3b,2b,3b .word 1b,3b,2b,3b,5b,3b,6b,3b
...@@ -106,7 +106,7 @@ void do_page_fault(struct pt_regs *regs, unsigned long address, ...@@ -106,7 +106,7 @@ void do_page_fault(struct pt_regs *regs, unsigned long address,
regs->esr = error_code; regs->esr = error_code;
/* On a kernel SLB miss we can only check for a valid exception entry */ /* On a kernel SLB miss we can only check for a valid exception entry */
if (kernel_mode(regs) && (address >= TASK_SIZE)) { if (unlikely(kernel_mode(regs) && (address >= TASK_SIZE))) {
printk(KERN_WARNING "kernel task_size exceed"); printk(KERN_WARNING "kernel task_size exceed");
_exception(SIGSEGV, regs, code, address); _exception(SIGSEGV, regs, code, address);
} }
...@@ -122,7 +122,7 @@ void do_page_fault(struct pt_regs *regs, unsigned long address, ...@@ -122,7 +122,7 @@ void do_page_fault(struct pt_regs *regs, unsigned long address,
} }
#endif /* CONFIG_KGDB */ #endif /* CONFIG_KGDB */
if (in_atomic() || !mm) { if (unlikely(in_atomic() || !mm)) {
if (kernel_mode(regs)) if (kernel_mode(regs))
goto bad_area_nosemaphore; goto bad_area_nosemaphore;
...@@ -150,7 +150,7 @@ void do_page_fault(struct pt_regs *regs, unsigned long address, ...@@ -150,7 +150,7 @@ void do_page_fault(struct pt_regs *regs, unsigned long address,
* source. If this is invalid we can skip the address space check, * source. If this is invalid we can skip the address space check,
* thus avoiding the deadlock. * thus avoiding the deadlock.
*/ */
if (!down_read_trylock(&mm->mmap_sem)) { if (unlikely(!down_read_trylock(&mm->mmap_sem))) {
if (kernel_mode(regs) && !search_exception_tables(regs->pc)) if (kernel_mode(regs) && !search_exception_tables(regs->pc))
goto bad_area_nosemaphore; goto bad_area_nosemaphore;
...@@ -158,16 +158,16 @@ void do_page_fault(struct pt_regs *regs, unsigned long address, ...@@ -158,16 +158,16 @@ void do_page_fault(struct pt_regs *regs, unsigned long address,
} }
vma = find_vma(mm, address); vma = find_vma(mm, address);
if (!vma) if (unlikely(!vma))
goto bad_area; goto bad_area;
if (vma->vm_start <= address) if (vma->vm_start <= address)
goto good_area; goto good_area;
if (!(vma->vm_flags & VM_GROWSDOWN)) if (unlikely(!(vma->vm_flags & VM_GROWSDOWN)))
goto bad_area; goto bad_area;
if (!is_write) if (unlikely(!is_write))
goto bad_area; goto bad_area;
/* /*
...@@ -179,7 +179,7 @@ void do_page_fault(struct pt_regs *regs, unsigned long address, ...@@ -179,7 +179,7 @@ void do_page_fault(struct pt_regs *regs, unsigned long address,
* before setting the user r1. Thus we allow the stack to * before setting the user r1. Thus we allow the stack to
* expand to 1MB without further checks. * expand to 1MB without further checks.
*/ */
if (address + 0x100000 < vma->vm_end) { if (unlikely(address + 0x100000 < vma->vm_end)) {
/* get user regs even if this fault is in kernel mode */ /* get user regs even if this fault is in kernel mode */
struct pt_regs *uregs = current->thread.regs; struct pt_regs *uregs = current->thread.regs;
...@@ -209,15 +209,15 @@ void do_page_fault(struct pt_regs *regs, unsigned long address, ...@@ -209,15 +209,15 @@ void do_page_fault(struct pt_regs *regs, unsigned long address,
code = SEGV_ACCERR; code = SEGV_ACCERR;
/* a write */ /* a write */
if (is_write) { if (unlikely(is_write)) {
if (!(vma->vm_flags & VM_WRITE)) if (unlikely(!(vma->vm_flags & VM_WRITE)))
goto bad_area; goto bad_area;
/* a read */ /* a read */
} else { } else {
/* protection fault */ /* protection fault */
if (error_code & 0x08000000) if (unlikely(error_code & 0x08000000))
goto bad_area; goto bad_area;
if (!(vma->vm_flags & (VM_READ | VM_EXEC))) if (unlikely(!(vma->vm_flags & (VM_READ | VM_EXEC))))
goto bad_area; goto bad_area;
} }
...@@ -235,7 +235,7 @@ void do_page_fault(struct pt_regs *regs, unsigned long address, ...@@ -235,7 +235,7 @@ void do_page_fault(struct pt_regs *regs, unsigned long address,
goto do_sigbus; goto do_sigbus;
BUG(); BUG();
} }
if (fault & VM_FAULT_MAJOR) if (unlikely(fault & VM_FAULT_MAJOR))
current->maj_flt++; current->maj_flt++;
else else
current->min_flt++; current->min_flt++;
......
...@@ -165,7 +165,6 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end) ...@@ -165,7 +165,6 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end)
for (addr = begin; addr < end; addr += PAGE_SIZE) { for (addr = begin; addr < end; addr += PAGE_SIZE) {
ClearPageReserved(virt_to_page(addr)); ClearPageReserved(virt_to_page(addr));
init_page_count(virt_to_page(addr)); init_page_count(virt_to_page(addr));
memset((void *)addr, 0xcc, PAGE_SIZE);
free_page(addr); free_page(addr);
totalram_pages++; totalram_pages++;
} }
...@@ -208,14 +207,6 @@ void __init mem_init(void) ...@@ -208,14 +207,6 @@ void __init mem_init(void)
} }
#ifndef CONFIG_MMU #ifndef CONFIG_MMU
/* Check against bounds of physical memory */
int ___range_ok(unsigned long addr, unsigned long size)
{
return ((addr < memory_start) ||
((addr + size) > memory_end));
}
EXPORT_SYMBOL(___range_ok);
int page_is_ram(unsigned long pfn) int page_is_ram(unsigned long pfn)
{ {
return __range_ok(pfn, 0); return __range_ok(pfn, 0);
......
...@@ -154,7 +154,7 @@ int map_page(unsigned long va, phys_addr_t pa, int flags) ...@@ -154,7 +154,7 @@ int map_page(unsigned long va, phys_addr_t pa, int flags)
err = 0; err = 0;
set_pte_at(&init_mm, va, pg, pfn_pte(pa >> PAGE_SHIFT, set_pte_at(&init_mm, va, pg, pfn_pte(pa >> PAGE_SHIFT,
__pgprot(flags))); __pgprot(flags)));
if (mem_init_done) if (unlikely(mem_init_done))
flush_HPTE(0, va, pmd_val(*pd)); flush_HPTE(0, va, pmd_val(*pd));
/* flush_HPTE(0, va, pg); */ /* flush_HPTE(0, va, pg); */
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment