Commit 02a99ed6 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-linus' of git://git.monstr.eu/linux-2.6-microblaze

* 'for-linus' of git://git.monstr.eu/linux-2.6-microblaze: (55 commits)
  microblaze: Don't use access_ok for unaligned
  microblaze: remove unused flat_stack_align() definition
  microblaze: Fix problem with early_printk in startup
  microblaze_mmu_v2: Makefiles
  microblaze_mmu_v2: Kconfig update
  microblaze_mmu_v2: stat.h MMU update
  microblaze_mmu_v2: Elf update
  microblaze_mmu_v2: Update dma.h for MMU
  microblaze_mmu_v2: Update cacheflush.h
  microblaze_mmu_v2: Update signal returning address
  microblaze_mmu_v2: Traps MMU update
  microblaze_mmu_v2: Enable fork syscall for MMU and add fork as vfork for noMMU
  microblaze_mmu_v2: Update linker script for MMU
  microblaze_mmu_v2: Add MMU related exceptions handling
  microblaze_mmu_v2: uaccess MMU update
  microblaze_mmu_v2: Update exception handling - MMU exception
  microblaze_mmu_v2: entry.S, entry.h
  microblaze_mmu_v2: Add CURRENT_TASK for entry.S
  microblaze_mmu_v2: MMU asm offset update
  microblaze_mmu_v2: Update tlb.h and tlbflush.h
  ...
parents 2b10dc45 3447ef29
......@@ -6,6 +6,7 @@ mainmenu "Linux/Microblaze Kernel Configuration"
config MICROBLAZE
def_bool y
select HAVE_LMB
select ARCH_WANT_OPTIONAL_GPIOLIB
config SWAP
def_bool n
......@@ -49,13 +50,14 @@ config GENERIC_CLOCKEVENTS
config GENERIC_HARDIRQS_NO__DO_IRQ
def_bool y
config GENERIC_GPIO
def_bool y
config PCI
depends on !MMU
def_bool n
config NO_DMA
depends on !MMU
def_bool n
def_bool y
source "init/Kconfig"
......@@ -72,7 +74,8 @@ source "kernel/Kconfig.preempt"
source "kernel/Kconfig.hz"
config MMU
def_bool n
bool "MMU support"
default n
config NO_MMU
bool
......@@ -105,9 +108,6 @@ config CMDLINE_FORCE
config OF
def_bool y
config OF_DEVICE
def_bool y
config PROC_DEVICETREE
bool "Support for device tree in /proc"
depends on PROC_FS
......@@ -118,6 +118,113 @@ config PROC_DEVICETREE
endmenu
menu "Advanced setup"
config ADVANCED_OPTIONS
bool "Prompt for advanced kernel configuration options"
depends on MMU
help
This option will enable prompting for a variety of advanced kernel
configuration options. These options can cause the kernel to not
work if they are set incorrectly, but can be used to optimize certain
aspects of kernel memory management.
Unless you know what you are doing, say N here.
comment "Default settings for advanced configuration options are used"
depends on !ADVANCED_OPTIONS
config HIGHMEM_START_BOOL
bool "Set high memory pool address"
depends on ADVANCED_OPTIONS && HIGHMEM
help
This option allows you to set the base address of the kernel virtual
area used to map high memory pages. This can be useful in
optimizing the layout of kernel virtual memory.
Say N here unless you know what you are doing.
config HIGHMEM_START
hex "Virtual start address of high memory pool" if HIGHMEM_START_BOOL
depends on MMU
default "0xfe000000"
config LOWMEM_SIZE_BOOL
bool "Set maximum low memory"
depends on ADVANCED_OPTIONS
help
This option allows you to set the maximum amount of memory which
will be used as "low memory", that is, memory which the kernel can
access directly, without having to set up a kernel virtual mapping.
This can be useful in optimizing the layout of kernel virtual
memory.
Say N here unless you know what you are doing.
config LOWMEM_SIZE
hex "Maximum low memory size (in bytes)" if LOWMEM_SIZE_BOOL
depends on MMU
default "0x30000000"
config KERNEL_START_BOOL
bool "Set custom kernel base address"
depends on ADVANCED_OPTIONS
help
This option allows you to set the kernel virtual address at which
the kernel will map low memory (the kernel image will be linked at
this address). This can be useful in optimizing the virtual memory
layout of the system.
Say N here unless you know what you are doing.
config KERNEL_START
hex "Virtual address of kernel base" if KERNEL_START_BOOL
default "0xc0000000" if MMU
default KERNEL_BASE_ADDR if !MMU
config TASK_SIZE_BOOL
bool "Set custom user task size"
depends on ADVANCED_OPTIONS
help
This option allows you to set the amount of virtual address space
allocated to user tasks. This can be useful in optimizing the
virtual memory layout of the system.
Say N here unless you know what you are doing.
config TASK_SIZE
hex "Size of user task space" if TASK_SIZE_BOOL
depends on MMU
default "0x80000000"
config CONSISTENT_START_BOOL
bool "Set custom consistent memory pool address"
depends on ADVANCED_OPTIONS && NOT_COHERENT_CACHE
help
This option allows you to set the base virtual address
of the the consistent memory pool. This pool of virtual
memory is used to make consistent memory allocations.
config CONSISTENT_START
hex "Base virtual address of consistent memory pool" if CONSISTENT_START_BOOL
depends on MMU
default "0xff100000" if NOT_COHERENT_CACHE
config CONSISTENT_SIZE_BOOL
bool "Set custom consistent memory pool size"
depends on ADVANCED_OPTIONS && NOT_COHERENT_CACHE
help
This option allows you to set the size of the the
consistent memory pool. This pool of virtual memory
is used to make consistent memory allocations.
config CONSISTENT_SIZE
hex "Size of consistent memory pool" if CONSISTENT_SIZE_BOOL
depends on MMU
default "0x00200000" if NOT_COHERENT_CACHE
endmenu
source "mm/Kconfig"
menu "Exectuable file formats"
......
ifeq ($(CONFIG_MMU),y)
UTS_SYSNAME = -DUTS_SYSNAME=\"Linux\"
else
UTS_SYSNAME = -DUTS_SYSNAME=\"uClinux\"
endif
# What CPU vesion are we building for, and crack it open
# as major.minor.rev
......@@ -36,6 +40,8 @@ CPUFLAGS-1 += $(call cc-option,-mcpu=v$(CPU_VER))
# r31 holds current when in kernel mode
CFLAGS_KERNEL += -ffixed-r31 $(CPUFLAGS-1) $(CPUFLAGS-2)
LDFLAGS :=
LDFLAGS_vmlinux :=
LDFLAGS_BLOB := --format binary --oformat elf32-microblaze
LIBGCC := $(shell $(CC) $(CFLAGS_KERNEL) -print-libgcc-file-name)
......
......@@ -7,6 +7,8 @@ targets := linux.bin linux.bin.gz
OBJCOPYFLAGS_linux.bin := -O binary
$(obj)/linux.bin: vmlinux FORCE
[ -n $(CONFIG_INITRAMFS_SOURCE) ] && [ ! -e $(CONFIG_INITRAMFS_SOURCE) ] && \
touch $(CONFIG_INITRAMFS_SOURCE) || echo "No CPIO image"
$(call if_changed,objcopy)
@echo 'Kernel: $@ is ready' ' (#'`cat .version`')'
......
This diff is collapsed.
include include/asm-generic/Kbuild.asm
header-y += auxvec.h
header-y += errno.h
header-y += fcntl.h
header-y += ioctl.h
header-y += ioctls.h
header-y += ipcbuf.h
header-y += linkage.h
header-y += msgbuf.h
header-y += poll.h
header-y += resource.h
header-y += sembuf.h
header-y += shmbuf.h
header-y += sigcontext.h
header-y += siginfo.h
header-y += socket.h
header-y += sockios.h
header-y += statfs.h
header-y += stat.h
header-y += termbits.h
header-y += ucontext.h
unifdef-y += cputable.h
unifdef-y += elf.h
unifdef-y += termios.h
header-y += elf.h
/*
* Copyright (C) 2007 PetaLogix
* Copyright (C) 2007-2009 Michal Simek <monstr@monstr.eu>
* Copyright (C) 2007-2009 PetaLogix
* Copyright (C) 2007 John Williams <john.williams@petalogix.com>
* based on v850 version which was
* Copyright (C) 2001,02,03 NEC Electronics Corporation
......@@ -43,6 +44,23 @@
#define flush_icache_range(start, len) __invalidate_icache_range(start, len)
#define flush_icache_page(vma, pg) do { } while (0)
#ifndef CONFIG_MMU
# define flush_icache_user_range(start, len) do { } while (0)
#else
# define flush_icache_user_range(vma, pg, adr, len) __invalidate_icache_all()
# define flush_page_to_ram(page) do { } while (0)
# define flush_icache() __invalidate_icache_all()
# define flush_cache_sigtramp(vaddr) \
__invalidate_icache_range(vaddr, vaddr + 8)
# define flush_dcache_mmap_lock(mapping) do { } while (0)
# define flush_dcache_mmap_unlock(mapping) do { } while (0)
# define flush_cache_dup_mm(mm) do { } while (0)
#endif
#define flush_cache_vmap(start, end) do { } while (0)
#define flush_cache_vunmap(start, end) do { } while (0)
......
......@@ -51,7 +51,8 @@ extern __wsum csum_partial(const void *buff, int len, __wsum sum);
* here even more important to align src and dst on a 32-bit (or even
* better 64-bit) boundary
*/
extern __wsum csum_partial_copy(const char *src, char *dst, int len, int sum);
extern __wsum csum_partial_copy(const void *src, void *dst, int len,
__wsum sum);
/*
* the same as csum_partial_copy, but copies from user space.
......@@ -59,8 +60,8 @@ extern __wsum csum_partial_copy(const char *src, char *dst, int len, int sum);
* here even more important to align src and dst on a 32-bit (or even
* better 64-bit) boundary
*/
extern __wsum csum_partial_copy_from_user(const char *src, char *dst,
int len, int sum, int *csum_err);
extern __wsum csum_partial_copy_from_user(const void __user *src, void *dst,
int len, __wsum sum, int *csum_err);
#define csum_partial_copy_nocheck(src, dst, len, sum) \
csum_partial_copy((src), (dst), (len), (sum))
......@@ -75,11 +76,12 @@ extern __sum16 ip_fast_csum(const void *iph, unsigned int ihl);
/*
* Fold a partial checksum
*/
static inline __sum16 csum_fold(unsigned int sum)
static inline __sum16 csum_fold(__wsum csum)
{
u32 sum = (__force u32)csum;
sum = (sum & 0xffff) + (sum >> 16);
sum = (sum & 0xffff) + (sum >> 16);
return ~sum;
return (__force __sum16)~sum;
}
static inline __sum16
......@@ -93,6 +95,6 @@ csum_tcpudp_magic(__be32 saddr, __be32 daddr, unsigned short len,
* this routine is used for miscellaneous IP-like checksums, mainly
* in icmp.c
*/
extern __sum16 ip_compute_csum(const unsigned char *buff, int len);
extern __sum16 ip_compute_csum(const void *buff, int len);
#endif /* _ASM_MICROBLAZE_CHECKSUM_H */
/*
* Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu>
* Copyright (C) 2008-2009 PetaLogix
* Copyright (C) 2006 Atmark Techno, Inc.
*
* This file is subject to the terms and conditions of the GNU General Public
......@@ -9,6 +11,12 @@
#ifndef _ASM_MICROBLAZE_CURRENT_H
#define _ASM_MICROBLAZE_CURRENT_H
/*
* Register used to hold the current task pointer while in the kernel.
* Any `call clobbered' register without a special meaning should be OK,
* but check asm/microblaze/kernel/entry.S to be sure.
*/
#define CURRENT_TASK r31
# ifndef __ASSEMBLY__
/*
* Dedicate r31 to keeping the current task pointer
......
/*
* Copyright (C) 2006 Atmark Techno, Inc.
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#ifndef _ASM_MICROBLAZE_DMA_MAPPING_H
#define _ASM_MICROBLAZE_DMA_MAPPING_H
#include <asm/cacheflush.h>
#include <linux/io.h>
#include <linux/bug.h>
struct scatterlist;
#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
/* FIXME */
static inline int
dma_supported(struct device *dev, u64 mask)
{
return 1;
}
static inline dma_addr_t
dma_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size,
enum dma_data_direction direction)
{
BUG();
return 0;
}
static inline void
dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
enum dma_data_direction direction)
{
BUG();
}
static inline int
dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
enum dma_data_direction direction)
{
BUG();
return 0;
}
static inline void
dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
enum dma_data_direction direction)
{
BUG();
}
static inline void
dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size,
enum dma_data_direction direction)
{
BUG();
}
static inline void
dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle,
size_t size, enum dma_data_direction direction)
{
BUG();
}
static inline void
dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
enum dma_data_direction direction)
{
BUG();
}
static inline void
dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems,
enum dma_data_direction direction)
{
BUG();
}
static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
{
return 0;
}
static inline void *dma_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, int flag)
{
return NULL; /* consistent_alloc(flag, size, dma_handle); */
}
static inline void dma_free_coherent(struct device *dev, size_t size,
void *vaddr, dma_addr_t dma_handle)
{
BUG();
}
static inline dma_addr_t
dma_map_single(struct device *dev, void *ptr, size_t size,
enum dma_data_direction direction)
{
BUG_ON(direction == DMA_NONE);
return virt_to_bus(ptr);
}
static inline void dma_unmap_single(struct device *dev, dma_addr_t dma_addr,
size_t size,
enum dma_data_direction direction)
{
switch (direction) {
case DMA_FROM_DEVICE:
flush_dcache_range((unsigned)dma_addr,
(unsigned)dma_addr + size);
/* Fall through */
case DMA_TO_DEVICE:
break;
default:
BUG();
}
}
#endif /* _ASM_MICROBLAZE_DMA_MAPPING_H */
#include <asm-generic/dma-mapping-broken.h>
......@@ -9,8 +9,13 @@
#ifndef _ASM_MICROBLAZE_DMA_H
#define _ASM_MICROBLAZE_DMA_H
#ifndef CONFIG_MMU
/* we don't have dma address limit. define it as zero to be
* unlimited. */
#define MAX_DMA_ADDRESS (0)
#else
/* Virtual address corresponding to last available physical memory address. */
#define MAX_DMA_ADDRESS (CONFIG_KERNEL_START + memory_size - 1)
#endif
#endif /* _ASM_MICROBLAZE_DMA_H */
/*
* Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu>
* Copyright (C) 2008-2009 PetaLogix
* Copyright (C) 2006 Atmark Techno, Inc.
*
* This file is subject to the terms and conditions of the GNU General Public
......@@ -27,4 +29,95 @@
*/
#define ELF_CLASS ELFCLASS32
#ifndef __uClinux__
/*
* ELF register definitions..
*/
#include <asm/ptrace.h>
#include <asm/byteorder.h>
#ifndef ELF_GREG_T
#define ELF_GREG_T
typedef unsigned long elf_greg_t;
#endif
#ifndef ELF_NGREG
#define ELF_NGREG (sizeof(struct pt_regs) / sizeof(elf_greg_t))
#endif
#ifndef ELF_GREGSET_T
#define ELF_GREGSET_T
typedef elf_greg_t elf_gregset_t[ELF_NGREG];
#endif
#ifndef ELF_FPREGSET_T
#define ELF_FPREGSET_T
/* TBD */
#define ELF_NFPREG 33 /* includes fsr */
typedef unsigned long elf_fpreg_t;
typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
/* typedef struct user_fpu_struct elf_fpregset_t; */
#endif
/* This is the location that an ET_DYN program is loaded if exec'ed. Typical
* use of this is to invoke "./ld.so someprog" to test out a new version of
* the loader. We need to make sure that it is out of the way of the program
* that it will "exec", and that there is sufficient room for the brk.
*/
#define ELF_ET_DYN_BASE (0x08000000)
#ifdef __LITTLE_ENDIAN__
#define ELF_DATA ELFDATA2LSB
#else
#define ELF_DATA ELFDATA2MSB
#endif
#define USE_ELF_CORE_DUMP
#define ELF_EXEC_PAGESIZE 4096
#define ELF_CORE_COPY_REGS(_dest, _regs) \
memcpy((char *) &_dest, (char *) _regs, \
sizeof(struct pt_regs));
/* This yields a mask that user programs can use to figure out what
* instruction set this CPU supports. This could be done in user space,
* but it's not easy, and we've already done it here.
*/
#define ELF_HWCAP (0)
/* This yields a string that ld.so will use to load implementation
* specific libraries for optimization. This is more specific in
* intent than poking at uname or /proc/cpuinfo.
* For the moment, we have only optimizations for the Intel generations,
* but that could change...
*/
#define ELF_PLATFORM (NULL)
/* Added _f parameter. Is this definition correct: TBD */
#define ELF_PLAT_INIT(_r, _f) \
do { \
_r->r1 = _r->r1 = _r->r2 = _r->r3 = \
_r->r4 = _r->r5 = _r->r6 = _r->r7 = \
_r->r8 = _r->r9 = _r->r10 = _r->r11 = \
_r->r12 = _r->r13 = _r->r14 = _r->r15 = \
_r->r16 = _r->r17 = _r->r18 = _r->r19 = \
_r->r20 = _r->r21 = _r->r22 = _r->r23 = \
_r->r24 = _r->r25 = _r->r26 = _r->r27 = \
_r->r28 = _r->r29 = _r->r30 = _r->r31 = \
0; \
} while (0)
#ifdef __KERNEL__
#define SET_PERSONALITY(ex) set_personality(PER_LINUX_32BIT)
#endif
#endif /* __uClinux__ */
#endif /* _ASM_MICROBLAZE_ELF_H */
/*
* Definitions used by low-level trap handlers
*
* Copyright (C) 2008 Michal Simek
* Copyright (C) 2007 - 2008 PetaLogix
* Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu>
* Copyright (C) 2007-2009 PetaLogix
* Copyright (C) 2007 John Williams <john.williams@petalogix.com>
*
* This file is subject to the terms and conditions of the GNU General
......@@ -31,7 +31,40 @@ DECLARE_PER_CPU(unsigned int, R11_SAVE); /* Temp variable for entry */
DECLARE_PER_CPU(unsigned int, CURRENT_SAVE); /* Saved current pointer */
# endif /* __ASSEMBLY__ */
#ifndef CONFIG_MMU
/* noMMU hasn't any space for args */
# define STATE_SAVE_ARG_SPACE (0)
#else /* CONFIG_MMU */
/* If true, system calls save and restore all registers (except result
* registers, of course). If false, then `call clobbered' registers
* will not be preserved, on the theory that system calls are basically
* function calls anyway, and the caller should be able to deal with it.
* This is a security risk, of course, as `internal' values may leak out
* after a system call, but that certainly doesn't matter very much for
* a processor with no MMU protection! For a protected-mode kernel, it
* would be faster to just zero those registers before returning.
*
* I can not rely on the glibc implementation. If you turn it off make
* sure that r11/r12 is saved in user-space. --KAA
*
* These are special variables using by the kernel trap/interrupt code
* to save registers in, at a time when there are no spare registers we
* can use to do so, and we can't depend on the value of the stack
* pointer. This means that they must be within a signed 16-bit
* displacement of 0x00000000.
*/
/* A `state save frame' is a struct pt_regs preceded by some extra space
* suitable for a function call stack frame. */
/* Amount of room on the stack reserved for arguments and to satisfy the
* C calling conventions, in addition to the space used by the struct
* pt_regs that actually holds saved values. */
#define STATE_SAVE_ARG_SPACE (6*4) /* Up to six arguments */
#endif /* CONFIG_MMU */
#endif /* _ASM_MICROBLAZE_ENTRY_H */
/*
* Preliminary support for HW exception handing for Microblaze
*
* Copyright (C) 2008 Michal Simek
* Copyright (C) 2008 PetaLogix
* Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu>
* Copyright (C) 2008-2009 PetaLogix
* Copyright (C) 2005 John Williams <jwilliams@itee.uq.edu.au>
*
* This file is subject to the terms and conditions of the GNU General
......@@ -64,21 +64,13 @@ asmlinkage void full_exception(struct pt_regs *regs, unsigned int type,
void die(const char *str, struct pt_regs *fp, long err);
void _exception(int signr, struct pt_regs *regs, int code, unsigned long addr);
#if defined(CONFIG_XMON)
extern void xmon(struct pt_regs *regs);
extern int xmon_bpt(struct pt_regs *regs);
extern int xmon_sstep(struct pt_regs *regs);
extern int xmon_iabr_match(struct pt_regs *regs);
extern int xmon_dabr_match(struct pt_regs *regs);
extern void (*xmon_fault_handler)(struct pt_regs *regs);
#ifdef CONFIG_MMU
void __bug(const char *file, int line, void *data);
int bad_trap(int trap_num, struct pt_regs *regs);
int debug_trap(struct pt_regs *regs);
#endif /* CONFIG_MMU */
void (*debugger)(struct pt_regs *regs) = xmon;
int (*debugger_bpt)(struct pt_regs *regs) = xmon_bpt;
int (*debugger_sstep)(struct pt_regs *regs) = xmon_sstep;
int (*debugger_iabr_match)(struct pt_regs *regs) = xmon_iabr_match;
int (*debugger_dabr_match)(struct pt_regs *regs) = xmon_dabr_match;
void (*debugger_fault_handler)(struct pt_regs *regs);
#elif defined(CONFIG_KGDB)
#if defined(CONFIG_KGDB)
void (*debugger)(struct pt_regs *regs);
int (*debugger_bpt)(struct pt_regs *regs);
int (*debugger_sstep)(struct pt_regs *regs);
......
......@@ -13,7 +13,6 @@
#include <asm/unaligned.h>
#define flat_stack_align(sp) /* nothing needed */
#define flat_argvp_envp_on_stack() 0
#define flat_old_ram_flag(flags) (flags)
#define flat_reloc_valid(reloc, size) ((reloc) <= (size))
......
......@@ -11,8 +11,8 @@
* (at your option) any later version.
*/
#ifndef __ASM_POWERPC_GPIO_H
#define __ASM_POWERPC_GPIO_H
#ifndef _ASM_MICROBLAZE_GPIO_H
#define _ASM_MICROBLAZE_GPIO_H
#include <linux/errno.h>
#include <asm-generic/gpio.h>
......@@ -53,4 +53,4 @@ static inline int irq_to_gpio(unsigned int irq)
#endif /* CONFIG_GPIOLIB */
#endif /* __ASM_POWERPC_GPIO_H */
#endif /* _ASM_MICROBLAZE_GPIO_H */
/*
* Copyright (C) 2007-2009 Michal Simek <monstr@monstr.eu>
* Copyright (C) 2007-2009 PetaLogix
* Copyright (C) 2006 Atmark Techno, Inc.
*
* This file is subject to the terms and conditions of the GNU General Public
......@@ -12,6 +14,9 @@
#include <asm/byteorder.h>
#include <asm/page.h>
#include <linux/types.h>
#include <asm/byteorder.h>
#include <linux/mm.h> /* Get struct page {...} */
#define IO_SPACE_LIMIT (0xFFFFFFFF)
......@@ -112,6 +117,30 @@ static inline void writel(unsigned int v, volatile void __iomem *addr)
#define memcpy_fromio(a, b, c) memcpy((a), (void *)(b), (c))
#define memcpy_toio(a, b, c) memcpy((void *)(a), (b), (c))
#ifdef CONFIG_MMU
#define mm_ptov(addr) ((void *)__phys_to_virt(addr))
#define mm_vtop(addr) ((unsigned long)__virt_to_phys(addr))
#define phys_to_virt(addr) ((void *)__phys_to_virt(addr))
#define virt_to_phys(addr) ((unsigned long)__virt_to_phys(addr))
#define virt_to_bus(addr) ((unsigned long)__virt_to_phys(addr))
#define __page_address(page) \
(PAGE_OFFSET + (((page) - mem_map) << PAGE_SHIFT))
#define page_to_phys(page) virt_to_phys((void *)__page_address(page))
#define page_to_bus(page) (page_to_phys(page))
#define bus_to_virt(addr) (phys_to_virt(addr))
extern void iounmap(void *addr);
/*extern void *__ioremap(phys_addr_t address, unsigned long size,
unsigned long flags);*/
extern void __iomem *ioremap(phys_addr_t address, unsigned long size);
#define ioremap_writethrough(addr, size) ioremap((addr), (size))
#define ioremap_nocache(addr, size) ioremap((addr), (size))
#define ioremap_fullcache(addr, size) ioremap((addr), (size))
#else /* CONFIG_MMU */
/**
* virt_to_phys - map virtual addresses to physical
* @address: address to remap
......@@ -160,6 +189,8 @@ static inline void __iomem *__ioremap(phys_addr_t address, unsigned long size,
#define iounmap(addr) ((void)0)
#define ioremap_nocache(physaddr, size) ioremap(physaddr, size)
#endif /* CONFIG_MMU */
/*
* Convert a physical pointer to a virtual kernel pointer for /dev/mem
* access
......
/*
* Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu>
* Copyright (C) 2008-2009 PetaLogix
* Copyright (C) 2006 Atmark Techno, Inc.
*
* This file is subject to the terms and conditions of the GNU General Public
......@@ -9,11 +11,109 @@
#ifndef _ASM_MICROBLAZE_MMU_H
#define _ASM_MICROBLAZE_MMU_H
#ifndef __ASSEMBLY__
# ifndef CONFIG_MMU
# ifndef __ASSEMBLY__
typedef struct {
struct vm_list_struct *vmlist;
unsigned long end_brk;
} mm_context_t;
#endif /* __ASSEMBLY__ */
# endif /* __ASSEMBLY__ */
# else /* CONFIG_MMU */
# ifdef __KERNEL__
# ifndef __ASSEMBLY__
/* Default "unsigned long" context */
typedef unsigned long mm_context_t;
/* Hardware Page Table Entry */
typedef struct _PTE {
unsigned long v:1; /* Entry is valid */
unsigned long vsid:24; /* Virtual segment identifier */
unsigned long h:1; /* Hash algorithm indicator */
unsigned long api:6; /* Abbreviated page index */
unsigned long rpn:20; /* Real (physical) page number */
unsigned long :3; /* Unused */
unsigned long r:1; /* Referenced */
unsigned long c:1; /* Changed */
unsigned long w:1; /* Write-thru cache mode */
unsigned long i:1; /* Cache inhibited */
unsigned long m:1; /* Memory coherence */
unsigned long g:1; /* Guarded */
unsigned long :1; /* Unused */
unsigned long pp:2; /* Page protection */
} PTE;
/* Values for PP (assumes Ks=0, Kp=1) */
# define PP_RWXX 0 /* Supervisor read/write, User none */
# define PP_RWRX 1 /* Supervisor read/write, User read */
# define PP_RWRW 2 /* Supervisor read/write, User read/write */
# define PP_RXRX 3 /* Supervisor read, User read */
/* Segment Register */
typedef struct _SEGREG {
unsigned long t:1; /* Normal or I/O type */
unsigned long ks:1; /* Supervisor 'key' (normally 0) */
unsigned long kp:1; /* User 'key' (normally 1) */
unsigned long n:1; /* No-execute */
unsigned long :4; /* Unused */
unsigned long vsid:24; /* Virtual Segment Identifier */
} SEGREG;
extern void _tlbie(unsigned long va); /* invalidate a TLB entry */
extern void _tlbia(void); /* invalidate all TLB entries */
# endif /* __ASSEMBLY__ */
/*
* The MicroBlaze processor has a TLB architecture identical to PPC-40x. The
* instruction and data sides share a unified, 64-entry, semi-associative
* TLB which is maintained totally under software control. In addition, the
* instruction side has a hardware-managed, 2,4, or 8-entry, fully-associative
* TLB which serves as a first level to the shared TLB. These two TLBs are
* known as the UTLB and ITLB, respectively.
*/
# define MICROBLAZE_TLB_SIZE 64
/*
* TLB entries are defined by a "high" tag portion and a "low" data
* portion. The data portion is 32-bits.
*
* TLB entries are managed entirely under software control by reading,
* writing, and searching using the MTS and MFS instructions.
*/
# define TLB_LO 1
# define TLB_HI 0
# define TLB_DATA TLB_LO
# define TLB_TAG TLB_HI
/* Tag portion */
# define TLB_EPN_MASK 0xFFFFFC00 /* Effective Page Number */
# define TLB_PAGESZ_MASK 0x00000380
# define TLB_PAGESZ(x) (((x) & 0x7) << 7)
# define PAGESZ_1K 0
# define PAGESZ_4K 1
# define PAGESZ_16K 2
# define PAGESZ_64K 3
# define PAGESZ_256K 4
# define PAGESZ_1M 5
# define PAGESZ_4M 6
# define PAGESZ_16M 7
# define TLB_VALID 0x00000040 /* Entry is valid */
/* Data portion */
# define TLB_RPN_MASK 0xFFFFFC00 /* Real Page Number */
# define TLB_PERM_MASK 0x00000300
# define TLB_EX 0x00000200 /* Instruction execution allowed */
# define TLB_WR 0x00000100 /* Writes permitted */
# define TLB_ZSEL_MASK 0x000000F0
# define TLB_ZSEL(x) (((x) & 0xF) << 4)
# define TLB_ATTR_MASK 0x0000000F
# define TLB_W 0x00000008 /* Caching is write-through */
# define TLB_I 0x00000004 /* Caching is inhibited */
# define TLB_M 0x00000002 /* Memory is coherent */
# define TLB_G 0x00000001 /* Memory is guarded from prefetch */
# endif /* __KERNEL__ */
# endif /* CONFIG_MMU */
#endif /* _ASM_MICROBLAZE_MMU_H */
/*
* Copyright (C) 2006 Atmark Techno, Inc.
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#ifndef _ASM_MICROBLAZE_MMU_CONTEXT_H
#define _ASM_MICROBLAZE_MMU_CONTEXT_H
# define init_new_context(tsk, mm) ({ 0; })
# define enter_lazy_tlb(mm, tsk) do {} while (0)
# define change_mm_context(old, ctx, _pml4) do {} while (0)
# define destroy_context(mm) do {} while (0)
# define deactivate_mm(tsk, mm) do {} while (0)
# define switch_mm(prev, next, tsk) do {} while (0)
# define activate_mm(prev, next) do {} while (0)
#endif /* _ASM_MICROBLAZE_MMU_CONTEXT_H */
#ifdef CONFIG_MMU
# include "mmu_context_mm.h"
#else
# include "mmu_context_no.h"
#endif
/*
* Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu>
* Copyright (C) 2008-2009 PetaLogix
* Copyright (C) 2006 Atmark Techno, Inc.
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#ifndef _ASM_MICROBLAZE_MMU_CONTEXT_H
#define _ASM_MICROBLAZE_MMU_CONTEXT_H
#include <asm/atomic.h>
#include <asm/bitops.h>
#include <asm/mmu.h>
#include <asm-generic/mm_hooks.h>
# ifdef __KERNEL__
/*
* This function defines the mapping from contexts to VSIDs (virtual
* segment IDs). We use a skew on both the context and the high 4 bits
* of the 32-bit virtual address (the "effective segment ID") in order
* to spread out the entries in the MMU hash table.
*/
# define CTX_TO_VSID(ctx, va) (((ctx) * (897 * 16) + ((va) >> 28) * 0x111) \
& 0xffffff)
/*
MicroBlaze has 256 contexts, so we can just rotate through these
as a way of "switching" contexts. If the TID of the TLB is zero,
the PID/TID comparison is disabled, so we can use a TID of zero
to represent all kernel pages as shared among all contexts.
*/
static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
{
}
# define NO_CONTEXT 256
# define LAST_CONTEXT 255
# define FIRST_CONTEXT 1
/*
* Set the current MMU context.
* This is done byloading up the segment registers for the user part of the
* address space.
*
* Since the PGD is immediately available, it is much faster to simply
* pass this along as a second parameter, which is required for 8xx and
* can be used for debugging on all processors (if you happen to have
* an Abatron).
*/
extern void set_context(mm_context_t context, pgd_t *pgd);
/*
* Bitmap of contexts in use.
* The size of this bitmap is LAST_CONTEXT + 1 bits.
*/
extern unsigned long context_map[];
/*
* This caches the next context number that we expect to be free.
* Its use is an optimization only, we can't rely on this context
* number to be free, but it usually will be.
*/
extern mm_context_t next_mmu_context;
/*
* Since we don't have sufficient contexts to give one to every task
* that could be in the system, we need to be able to steal contexts.
* These variables support that.
*/
extern atomic_t nr_free_contexts;
extern struct mm_struct *context_mm[LAST_CONTEXT+1];
extern void steal_context(void);
/*
* Get a new mmu context for the address space described by `mm'.
*/
static inline void get_mmu_context(struct mm_struct *mm)
{
mm_context_t ctx;
if (mm->context != NO_CONTEXT)
return;
while (atomic_dec_if_positive(&nr_free_contexts) < 0)
steal_context();
ctx = next_mmu_context;
while (test_and_set_bit(ctx, context_map)) {
ctx = find_next_zero_bit(context_map, LAST_CONTEXT+1, ctx);
if (ctx > LAST_CONTEXT)
ctx = 0;
}
next_mmu_context = (ctx + 1) & LAST_CONTEXT;
mm->context = ctx;
context_mm[ctx] = mm;
}
/*
* Set up the context for a new address space.
*/
# define init_new_context(tsk, mm) (((mm)->context = NO_CONTEXT), 0)
/*
* We're finished using the context for an address space.
*/
static inline void destroy_context(struct mm_struct *mm)
{
if (mm->context != NO_CONTEXT) {
clear_bit(mm->context, context_map);
mm->context = NO_CONTEXT;
atomic_inc(&nr_free_contexts);
}
}
static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
struct task_struct *tsk)
{
tsk->thread.pgdir = next->pgd;
get_mmu_context(next);
set_context(next->context, next->pgd);
}
/*
* After we have set current->mm to a new value, this activates
* the context for the new mm so we see the new mappings.
*/
static inline void activate_mm(struct mm_struct *active_mm,
struct mm_struct *mm)
{
current->thread.pgdir = mm->pgd;
get_mmu_context(mm);
set_context(mm->context, mm->pgd);
}
extern void mmu_context_init(void);
# endif /* __KERNEL__ */
#endif /* _ASM_MICROBLAZE_MMU_CONTEXT_H */
/*
* Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu>
* Copyright (C) 2008-2009 PetaLogix
* Copyright (C) 2006 Atmark Techno, Inc.
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#ifndef _ASM_MICROBLAZE_MMU_CONTEXT_H
#define _ASM_MICROBLAZE_MMU_CONTEXT_H
# define init_new_context(tsk, mm) ({ 0; })
# define enter_lazy_tlb(mm, tsk) do {} while (0)
# define change_mm_context(old, ctx, _pml4) do {} while (0)
# define destroy_context(mm) do {} while (0)
# define deactivate_mm(tsk, mm) do {} while (0)
# define switch_mm(prev, next, tsk) do {} while (0)
# define activate_mm(prev, next) do {} while (0)
#endif /* _ASM_MICROBLAZE_MMU_CONTEXT_H */
/*
* Copyright (C) 2008 Michal Simek
* Copyright (C) 2008 PetaLogix
* VM ops
*
* Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu>
* Copyright (C) 2008-2009 PetaLogix
* Copyright (C) 2006 Atmark Techno, Inc.
* Changes for MMU support:
* Copyright (C) 2007 Xilinx, Inc. All rights reserved.
......@@ -15,14 +17,15 @@
#include <linux/pfn.h>
#include <asm/setup.h>
#include <linux/const.h>
#ifdef __KERNEL__
/* PAGE_SHIFT determines the page size */
#define PAGE_SHIFT (12)
#define PAGE_SIZE (1UL << PAGE_SHIFT)
#define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
#define PAGE_MASK (~(PAGE_SIZE-1))
#ifdef __KERNEL__
#ifndef __ASSEMBLY__
#define PAGE_UP(addr) (((addr)+((PAGE_SIZE)-1))&(~((PAGE_SIZE)-1)))
......@@ -35,6 +38,7 @@
/* align addr on a size boundary - adjust address up if needed */
#define _ALIGN(addr, size) _ALIGN_UP(addr, size)
#ifndef CONFIG_MMU
/*
* PAGE_OFFSET -- the first address of the first page of memory. When not
* using MMU this corresponds to the first free page in physical memory (aligned
......@@ -43,15 +47,44 @@
extern unsigned int __page_offset;
#define PAGE_OFFSET __page_offset
#define copy_page(to, from) memcpy((to), (from), PAGE_SIZE)
#define get_user_page(vaddr) __get_free_page(GFP_KERNEL)
#define free_user_page(page, addr) free_page(addr)
#else /* CONFIG_MMU */
#define clear_page(pgaddr) memset((pgaddr), 0, PAGE_SIZE)
/*
* PAGE_OFFSET -- the first address of the first page of memory. With MMU
* it is set to the kernel start address (aligned on a page boundary).
*
* CONFIG_KERNEL_START is defined in arch/microblaze/config.in and used
* in arch/microblaze/Makefile.
*/
#define PAGE_OFFSET CONFIG_KERNEL_START
/*
* MAP_NR -- given an address, calculate the index of the page struct which
* points to the address's page.
*/
#define MAP_NR(addr) (((unsigned long)(addr) - PAGE_OFFSET) >> PAGE_SHIFT)
#define clear_user_page(pgaddr, vaddr, page) memset((pgaddr), 0, PAGE_SIZE)
#define copy_user_page(vto, vfrom, vaddr, topg) \
/*
* The basic type of a PTE - 32 bit physical addressing.
*/
typedef unsigned long pte_basic_t;
#define PTE_SHIFT (PAGE_SHIFT - 2) /* 1024 ptes per page */
#define PTE_FMT "%.8lx"
#endif /* CONFIG_MMU */
# ifndef CONFIG_MMU
# define copy_page(to, from) memcpy((to), (from), PAGE_SIZE)
# define get_user_page(vaddr) __get_free_page(GFP_KERNEL)
# define free_user_page(page, addr) free_page(addr)
# else /* CONFIG_MMU */
extern void copy_page(void *to, void *from);
# endif /* CONFIG_MMU */
# define clear_page(pgaddr) memset((pgaddr), 0, PAGE_SIZE)
# define clear_user_page(pgaddr, vaddr, page) memset((pgaddr), 0, PAGE_SIZE)
# define copy_user_page(vto, vfrom, vaddr, topg) \
memcpy((vto), (vfrom), PAGE_SIZE)
/*
......@@ -60,21 +93,32 @@ extern unsigned int __page_offset;
typedef struct page *pgtable_t;
typedef struct { unsigned long pte; } pte_t;
typedef struct { unsigned long pgprot; } pgprot_t;
/* FIXME this can depend on linux kernel version */
# ifdef CONFIG_MMU
typedef struct { unsigned long pmd; } pmd_t;
typedef struct { unsigned long pgd; } pgd_t;
# else /* CONFIG_MMU */
typedef struct { unsigned long ste[64]; } pmd_t;
typedef struct { pmd_t pue[1]; } pud_t;
typedef struct { pud_t pge[1]; } pgd_t;
# endif /* CONFIG_MMU */
# define pte_val(x) ((x).pte)
# define pgprot_val(x) ((x).pgprot)
#define pte_val(x) ((x).pte)
#define pgprot_val(x) ((x).pgprot)
#define pmd_val(x) ((x).ste[0])
#define pud_val(x) ((x).pue[0])
#define pgd_val(x) ((x).pge[0])
# ifdef CONFIG_MMU
# define pmd_val(x) ((x).pmd)
# define pgd_val(x) ((x).pgd)
# else /* CONFIG_MMU */
# define pmd_val(x) ((x).ste[0])
# define pud_val(x) ((x).pue[0])
# define pgd_val(x) ((x).pge[0])
# endif /* CONFIG_MMU */
#define __pte(x) ((pte_t) { (x) })
#define __pmd(x) ((pmd_t) { (x) })
#define __pgd(x) ((pgd_t) { (x) })
#define __pgprot(x) ((pgprot_t) { (x) })
# define __pte(x) ((pte_t) { (x) })
# define __pmd(x) ((pmd_t) { (x) })
# define __pgd(x) ((pgd_t) { (x) })
# define __pgprot(x) ((pgprot_t) { (x) })
/**
* Conversions for virtual address, physical address, pfn, and struct
......@@ -94,44 +138,80 @@ extern unsigned long max_low_pfn;
extern unsigned long min_low_pfn;
extern unsigned long max_pfn;
#define __pa(vaddr) ((unsigned long) (vaddr))
#define __va(paddr) ((void *) (paddr))
extern unsigned long memory_start;
extern unsigned long memory_end;
extern unsigned long memory_size;
#define phys_to_pfn(phys) (PFN_DOWN(phys))
#define pfn_to_phys(pfn) (PFN_PHYS(pfn))
extern int page_is_ram(unsigned long pfn);
#define virt_to_pfn(vaddr) (phys_to_pfn((__pa(vaddr))))
#define pfn_to_virt(pfn) __va(pfn_to_phys((pfn)))
# define phys_to_pfn(phys) (PFN_DOWN(phys))
# define pfn_to_phys(pfn) (PFN_PHYS(pfn))
#define virt_to_page(vaddr) (pfn_to_page(virt_to_pfn(vaddr)))
#define page_to_virt(page) (pfn_to_virt(page_to_pfn(page)))
# define virt_to_pfn(vaddr) (phys_to_pfn((__pa(vaddr))))
# define pfn_to_virt(pfn) __va(pfn_to_phys((pfn)))
#define page_to_phys(page) (pfn_to_phys(page_to_pfn(page)))
#define page_to_bus(page) (page_to_phys(page))
#define phys_to_page(paddr) (pfn_to_page(phys_to_pfn(paddr)))
# ifdef CONFIG_MMU
# define virt_to_page(kaddr) (mem_map + MAP_NR(kaddr))
# else /* CONFIG_MMU */
# define virt_to_page(vaddr) (pfn_to_page(virt_to_pfn(vaddr)))
# define page_to_virt(page) (pfn_to_virt(page_to_pfn(page)))
# define page_to_phys(page) (pfn_to_phys(page_to_pfn(page)))
# define page_to_bus(page) (page_to_phys(page))
# define phys_to_page(paddr) (pfn_to_page(phys_to_pfn(paddr)))
# endif /* CONFIG_MMU */
extern unsigned int memory_start;
extern unsigned int memory_end;
extern unsigned int memory_size;
# ifndef CONFIG_MMU
# define pfn_valid(pfn) ((pfn) >= min_low_pfn && (pfn) <= max_mapnr)
# define ARCH_PFN_OFFSET (PAGE_OFFSET >> PAGE_SHIFT)
# else /* CONFIG_MMU */
# define ARCH_PFN_OFFSET (memory_start >> PAGE_SHIFT)
# define pfn_valid(pfn) ((pfn) < (max_mapnr + ARCH_PFN_OFFSET))
# define VALID_PAGE(page) ((page - mem_map) < max_mapnr)
# endif /* CONFIG_MMU */
#define pfn_valid(pfn) ((pfn) >= min_low_pfn && (pfn) < max_mapnr)
# endif /* __ASSEMBLY__ */
#define ARCH_PFN_OFFSET (PAGE_OFFSET >> PAGE_SHIFT)
#define virt_addr_valid(vaddr) (pfn_valid(virt_to_pfn(vaddr)))
#else
#define tophys(rd, rs) (addik rd, rs, 0)
#define tovirt(rd, rs) (addik rd, rs, 0)
#endif /* __ASSEMBLY__ */
#define virt_addr_valid(vaddr) (pfn_valid(virt_to_pfn(vaddr)))
# ifndef CONFIG_MMU
# define __pa(vaddr) ((unsigned long) (vaddr))
# define __va(paddr) ((void *) (paddr))
# else /* CONFIG_MMU */
# define __pa(x) __virt_to_phys((unsigned long)(x))
# define __va(x) ((void *)__phys_to_virt((unsigned long)(x)))
# endif /* CONFIG_MMU */
/* Convert between virtual and physical address for MMU. */
/* Handle MicroBlaze processor with virtual memory. */
#ifndef CONFIG_MMU
#define __virt_to_phys(addr) addr
#define __phys_to_virt(addr) addr
#define tophys(rd, rs) addik rd, rs, 0
#define tovirt(rd, rs) addik rd, rs, 0
#else
#define __virt_to_phys(addr) \
((addr) + CONFIG_KERNEL_BASE_ADDR - CONFIG_KERNEL_START)
#define __phys_to_virt(addr) \
((addr) + CONFIG_KERNEL_START - CONFIG_KERNEL_BASE_ADDR)
#define tophys(rd, rs) \
addik rd, rs, (CONFIG_KERNEL_BASE_ADDR - CONFIG_KERNEL_START)
#define tovirt(rd, rs) \
addik rd, rs, (CONFIG_KERNEL_START - CONFIG_KERNEL_BASE_ADDR)
#endif /* CONFIG_MMU */
#define TOPHYS(addr) __virt_to_phys(addr)
#ifdef CONFIG_MMU
#ifdef CONFIG_CONTIGUOUS_PAGE_ALLOC
#define WANT_PAGE_VIRTUAL 1 /* page alloc 2 relies on this */
#endif
#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \
VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
#endif /* CONFIG_MMU */
#endif /* __KERNEL__ */
#include <asm-generic/memory_model.h>
......
/*
* Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu>
* Copyright (C) 2008-2009 PetaLogix
* Copyright (C) 2006 Atmark Techno, Inc.
*
* This file is subject to the terms and conditions of the GNU General Public
......@@ -9,6 +11,195 @@
#ifndef _ASM_MICROBLAZE_PGALLOC_H
#define _ASM_MICROBLAZE_PGALLOC_H
#ifdef CONFIG_MMU
#include <linux/kernel.h> /* For min/max macros */
#include <linux/highmem.h>
#include <asm/setup.h>
#include <asm/io.h>
#include <asm/page.h>
#include <asm/cache.h>
#define PGDIR_ORDER 0
/*
* This is handled very differently on MicroBlaze since out page tables
* are all 0's and I want to be able to use these zero'd pages elsewhere
* as well - it gives us quite a speedup.
* -- Cort
*/
extern struct pgtable_cache_struct {
unsigned long *pgd_cache;
unsigned long *pte_cache;
unsigned long pgtable_cache_sz;
} quicklists;
#define pgd_quicklist (quicklists.pgd_cache)
#define pmd_quicklist ((unsigned long *)0)
#define pte_quicklist (quicklists.pte_cache)
#define pgtable_cache_size (quicklists.pgtable_cache_sz)
extern unsigned long *zero_cache; /* head linked list of pre-zero'd pages */
extern atomic_t zero_sz; /* # currently pre-zero'd pages */
extern atomic_t zeropage_hits; /* # zero'd pages request that we've done */
extern atomic_t zeropage_calls; /* # zero'd pages request that've been made */
extern atomic_t zerototal; /* # pages zero'd over time */
#define zero_quicklist (zero_cache)
#define zero_cache_sz (zero_sz)
#define zero_cache_calls (zeropage_calls)
#define zero_cache_hits (zeropage_hits)
#define zero_cache_total (zerototal)
/*
* return a pre-zero'd page from the list,
* return NULL if none available -- Cort
*/
extern unsigned long get_zero_page_fast(void);
extern void __bad_pte(pmd_t *pmd);
extern inline pgd_t *get_pgd_slow(void)
{
pgd_t *ret;
ret = (pgd_t *)__get_free_pages(GFP_KERNEL, PGDIR_ORDER);
if (ret != NULL)
clear_page(ret);
return ret;
}
extern inline pgd_t *get_pgd_fast(void)
{
unsigned long *ret;
ret = pgd_quicklist;
if (ret != NULL) {
pgd_quicklist = (unsigned long *)(*ret);
ret[0] = 0;
pgtable_cache_size--;
} else
ret = (unsigned long *)get_pgd_slow();
return (pgd_t *)ret;
}
extern inline void free_pgd_fast(pgd_t *pgd)
{
*(unsigned long **)pgd = pgd_quicklist;
pgd_quicklist = (unsigned long *) pgd;
pgtable_cache_size++;
}
extern inline void free_pgd_slow(pgd_t *pgd)
{
free_page((unsigned long)pgd);
}
#define pgd_free(mm, pgd) free_pgd_fast(pgd)
#define pgd_alloc(mm) get_pgd_fast()
#define pmd_pgtable(pmd) pmd_page(pmd)
/*
* We don't have any real pmd's, and this code never triggers because
* the pgd will always be present..
*/
#define pmd_alloc_one_fast(mm, address) ({ BUG(); ((pmd_t *)1); })
#define pmd_alloc_one(mm, address) ({ BUG(); ((pmd_t *)2); })
/* FIXME two definition - look below */
#define pmd_free(mm, x) do { } while (0)
#define pgd_populate(mm, pmd, pte) BUG()
static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
unsigned long address)
{
pte_t *pte;
extern int mem_init_done;
extern void *early_get_page(void);
if (mem_init_done) {
pte = (pte_t *)__get_free_page(GFP_KERNEL |
__GFP_REPEAT | __GFP_ZERO);
} else {
pte = (pte_t *)early_get_page();
if (pte)
clear_page(pte);
}
return pte;
}
static inline struct page *pte_alloc_one(struct mm_struct *mm,
unsigned long address)
{
struct page *ptepage;
#ifdef CONFIG_HIGHPTE
int flags = GFP_KERNEL | __GFP_HIGHMEM | __GFP_REPEAT;
#else
int flags = GFP_KERNEL | __GFP_REPEAT;
#endif
ptepage = alloc_pages(flags, 0);
if (ptepage)
clear_highpage(ptepage);
return ptepage;
}
static inline pte_t *pte_alloc_one_fast(struct mm_struct *mm,
unsigned long address)
{
unsigned long *ret;
ret = pte_quicklist;
if (ret != NULL) {
pte_quicklist = (unsigned long *)(*ret);
ret[0] = 0;
pgtable_cache_size--;
}
return (pte_t *)ret;
}
extern inline void pte_free_fast(pte_t *pte)
{
*(unsigned long **)pte = pte_quicklist;
pte_quicklist = (unsigned long *) pte;
pgtable_cache_size++;
}
extern inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
{
free_page((unsigned long)pte);
}
extern inline void pte_free_slow(struct page *ptepage)
{
__free_page(ptepage);
}
extern inline void pte_free(struct mm_struct *mm, struct page *ptepage)
{
__free_page(ptepage);
}
#define __pte_free_tlb(tlb, pte) pte_free((tlb)->mm, (pte))
#define pmd_populate(mm, pmd, pte) (pmd_val(*(pmd)) = page_address(pte))
#define pmd_populate_kernel(mm, pmd, pte) \
(pmd_val(*(pmd)) = (unsigned long) (pte))
/*
* We don't have any real pmd's, and this code never triggers because
* the pgd will always be present..
*/
#define pmd_alloc_one(mm, address) ({ BUG(); ((pmd_t *)2); })
/*#define pmd_free(mm, x) do { } while (0)*/
#define __pmd_free_tlb(tlb, x) do { } while (0)
#define pgd_populate(mm, pmd, pte) BUG()
extern int do_check_pgt_cache(int, int);
#endif /* CONFIG_MMU */
#define check_pgt_cache() do {} while (0)
#endif /* _ASM_MICROBLAZE_PGALLOC_H */
This diff is collapsed.
......@@ -16,7 +16,7 @@
*/
typedef unsigned long __kernel_ino_t;
typedef unsigned int __kernel_mode_t;
typedef unsigned short __kernel_mode_t;
typedef unsigned int __kernel_nlink_t;
typedef long __kernel_off_t;
typedef int __kernel_pid_t;
......
/*
* Copyright (C) 2008 Michal Simek
* Copyright (C) 2008 PetaLogix
* Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu>
* Copyright (C) 2008-2009 PetaLogix
* Copyright (C) 2006 Atmark Techno, Inc.
*
* This file is subject to the terms and conditions of the GNU General Public
......@@ -26,14 +26,15 @@ extern const struct seq_operations cpuinfo_op;
# define cpu_sleep() do {} while (0)
# define prepare_to_copy(tsk) do {} while (0)
# endif /* __ASSEMBLY__ */
#define task_pt_regs(tsk) \
(((struct pt_regs *)(THREAD_SIZE + task_stack_page(tsk))) - 1)
/* Do necessary setup to start up a newly executed thread. */
void start_thread(struct pt_regs *regs, unsigned long pc, unsigned long usp);
# endif /* __ASSEMBLY__ */
# ifndef CONFIG_MMU
/*
* User space process size: memory size
*
......@@ -85,4 +86,90 @@ extern int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags);
# define KSTK_EIP(tsk) (0)
# define KSTK_ESP(tsk) (0)
# else /* CONFIG_MMU */
/*
* This is used to define STACK_TOP, and with MMU it must be below
* kernel base to select the correct PGD when handling MMU exceptions.
*/
# define TASK_SIZE (CONFIG_KERNEL_START)
/*
* This decides where the kernel will search for a free chunk of vm
* space during mmap's.
*/
# define TASK_UNMAPPED_BASE (TASK_SIZE / 8 * 3)
# define THREAD_KSP 0
# ifndef __ASSEMBLY__
/*
* Default implementation of macro that returns current
* instruction pointer ("program counter").
*/
# define current_text_addr() ({ __label__ _l; _l: &&_l; })
/* If you change this, you must change the associated assembly-languages
* constants defined below, THREAD_*.
*/
struct thread_struct {
/* kernel stack pointer (must be first field in structure) */
unsigned long ksp;
unsigned long ksp_limit; /* if ksp <= ksp_limit stack overflow */
void *pgdir; /* root of page-table tree */
struct pt_regs *regs; /* Pointer to saved register state */
};
# define INIT_THREAD { \
.ksp = sizeof init_stack + (unsigned long)init_stack, \
.pgdir = swapper_pg_dir, \
}
/* Do necessary setup to start up a newly executed thread. */
void start_thread(struct pt_regs *regs,
unsigned long pc, unsigned long usp);
/* Free all resources held by a thread. */
extern inline void release_thread(struct task_struct *dead_task)
{
}
extern int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags);
/* Free current thread data structures etc. */
static inline void exit_thread(void)
{
}
/* Return saved (kernel) PC of a blocked thread. */
# define thread_saved_pc(tsk) \
((tsk)->thread.regs ? (tsk)->thread.regs->r15 : 0)
unsigned long get_wchan(struct task_struct *p);
/* The size allocated for kernel stacks. This _must_ be a power of two! */
# define KERNEL_STACK_SIZE 0x2000
/* Return some info about the user process TASK. */
# define task_tos(task) ((unsigned long)(task) + KERNEL_STACK_SIZE)
# define task_regs(task) ((struct pt_regs *)task_tos(task) - 1)
# define task_pt_regs_plus_args(tsk) \
(((void *)task_pt_regs(tsk)) - STATE_SAVE_ARG_SPACE)
# define task_sp(task) (task_regs(task)->r1)
# define task_pc(task) (task_regs(task)->pc)
/* Grotty old names for some. */
# define KSTK_EIP(task) (task_pc(task))
# define KSTK_ESP(task) (task_sp(task))
/* FIXME */
# define deactivate_mm(tsk, mm) do { } while (0)
# define STACK_TOP TASK_SIZE
# define STACK_TOP_MAX STACK_TOP
# endif /* __ASSEMBLY__ */
# endif /* CONFIG_MMU */
#endif /* _ASM_MICROBLAZE_PROCESSOR_H */
......@@ -10,7 +10,6 @@
#define _ASM_MICROBLAZE_PTRACE_H
#ifndef __ASSEMBLY__
#include <linux/types.h>
typedef unsigned long microblaze_reg_t;
......
/*
* Copyright (C) 2008 Michal Simek
* Copyright (C) 2008 PetaLogix
* Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu>
* Copyright (C) 2008-2009 PetaLogix
* Copyright (C) 2006 Atmark Techno, Inc.
*
* This file is subject to the terms and conditions of the GNU General Public
......@@ -30,4 +30,21 @@
#define FSR_UF (1<<1) /* Underflow */
#define FSR_DO (1<<0) /* Denormalized operand error */
# ifdef CONFIG_MMU
/* Machine State Register (MSR) Fields */
# define MSR_UM (1<<11) /* User Mode */
# define MSR_UMS (1<<12) /* User Mode Save */
# define MSR_VM (1<<13) /* Virtual Mode */
# define MSR_VMS (1<<14) /* Virtual Mode Save */
# define MSR_KERNEL (MSR_EE | MSR_VM)
/* # define MSR_USER (MSR_KERNEL | MSR_UM | MSR_IE) */
# define MSR_KERNEL_VMS (MSR_EE | MSR_VMS)
/* # define MSR_USER_VMS (MSR_KERNEL_VMS | MSR_UMS | MSR_IE) */
/* Exception State Register (ESR) Fields */
# define ESR_DIZ (1<<11) /* Zone Protection */
# define ESR_S (1<<10) /* Store instruction */
# endif /* CONFIG_MMU */
#endif /* _ASM_MICROBLAZE_REGISTERS_H */
/*
* Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu>
* Copyright (C) 2008-2009 PetaLogix
* Copyright (C) 2006 Atmark Techno, Inc.
*
* This file is subject to the terms and conditions of the GNU General Public
......@@ -14,6 +16,7 @@
# ifndef __ASSEMBLY__
extern char _ssbss[], _esbss[];
extern unsigned long __ivt_start[], __ivt_end[];
extern char _etext[], _stext[];
# ifdef CONFIG_MTD_UCLINUX
extern char *_ebss;
......
/*
* Copyright (C) 2008 Michal Simek
* Copyright (C) 2008 PetaLogix
* Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu>
* Copyright (C) 2008-2009 PetaLogix
* Copyright (C) 2006 Atmark Techno, Inc.
*
* This file is subject to the terms and conditions of the GNU General Public
......@@ -11,7 +11,7 @@
#ifndef _ASM_MICROBLAZE_SEGMENT_H
#define _ASM_MICROBLAZE_SEGMENT_H
#ifndef __ASSEMBLY__
# ifndef __ASSEMBLY__
typedef struct {
unsigned long seg;
......@@ -29,13 +29,19 @@ typedef struct {
*
* For non-MMU arch like Microblaze, KERNEL_DS and USER_DS is equal.
*/
# define KERNEL_DS ((mm_segment_t){0})
# define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })
# ifndef CONFIG_MMU
# define KERNEL_DS MAKE_MM_SEG(0)
# define USER_DS KERNEL_DS
# else
# define KERNEL_DS MAKE_MM_SEG(0xFFFFFFFF)
# define USER_DS MAKE_MM_SEG(TASK_SIZE - 1)
# endif
# define get_ds() (KERNEL_DS)
# define get_fs() (current_thread_info()->addr_limit)
# define set_fs(x) \
do { current_thread_info()->addr_limit = (x); } while (0)
# define set_fs(val) (current_thread_info()->addr_limit = (val))
# define segment_eq(a, b) ((a).seg == (b).seg)
......
/*
* Copyright (C) 2007-2008 Michal Simek <monstr@monstr.eu>
* Copyright (C) 2007-2009 Michal Simek <monstr@monstr.eu>
* Copyright (C) 2007-2009 PetaLogix
* Copyright (C) 2006 Atmark Techno, Inc.
*
* This file is subject to the terms and conditions of the GNU General Public
......@@ -18,7 +19,6 @@
extern unsigned int boot_cpuid; /* move to smp.h */
extern char cmd_line[COMMAND_LINE_SIZE];
# endif/* __KERNEL__ */
void early_printk(const char *fmt, ...);
......@@ -30,6 +30,11 @@ void setup_heartbeat(void);
unsigned long long sched_clock(void);
# ifdef CONFIG_MMU
extern void mmu_reset(void);
extern void early_console_reg_tlb_alloc(unsigned int addr);
# endif /* CONFIG_MMU */
void time_init(void);
void init_IRQ(void);
void machine_early_init(const char *cmdline, unsigned int ram,
......@@ -40,5 +45,6 @@ void machine_shutdown(void);
void machine_halt(void);
void machine_power_off(void);
# endif/* __KERNEL__ */
# endif /* __ASSEMBLY__ */
#endif /* _ASM_MICROBLAZE_SETUP_H */
......@@ -16,58 +16,53 @@
#include <linux/posix_types.h>
#define STAT_HAVE_NSEC 1
struct stat {
unsigned int st_dev;
unsigned long st_dev;
unsigned long st_ino;
unsigned int st_mode;
unsigned int st_nlink;
unsigned int st_uid;
unsigned int st_gid;
unsigned int st_rdev;
unsigned long st_size;
unsigned long st_blksize;
unsigned long st_blocks;
unsigned long st_atime;
unsigned long __unused1; /* unsigned long st_atime_nsec */
unsigned long st_mtime;
unsigned long __unused2; /* unsigned long st_mtime_nsec */
unsigned long st_ctime;
unsigned long __unused3; /* unsigned long st_ctime_nsec */
unsigned long st_rdev;
unsigned long __pad1;
long st_size;
int st_blksize;
int __pad2;
long st_blocks;
int st_atime;
unsigned int st_atime_nsec;
int st_mtime;
unsigned int st_mtime_nsec;
int st_ctime;
unsigned int st_ctime_nsec;
unsigned long __unused4;
unsigned long __unused5;
};
struct stat64 {
unsigned long long st_dev;
unsigned long __unused1;
unsigned long long st_ino;
unsigned int st_mode;
unsigned int st_nlink;
unsigned int st_uid;
unsigned int st_gid;
unsigned long long st_rdev;
unsigned long __unused3;
long long st_size;
unsigned long st_blksize;
unsigned long st_blocks; /* No. of 512-byte blocks allocated */
unsigned long __unused4; /* future possible st_blocks high bits */
unsigned long st_atime;
unsigned long st_atime_nsec;
unsigned long st_mtime;
unsigned long st_mtime_nsec;
unsigned long st_ctime;
unsigned long st_ctime_nsec;
unsigned long __unused8;
unsigned long long st_dev; /* Device. */
unsigned long long st_ino; /* File serial number. */
unsigned int st_mode; /* File mode. */
unsigned int st_nlink; /* Link count. */
unsigned int st_uid; /* User ID of the file's owner. */
unsigned int st_gid; /* Group ID of the file's group. */
unsigned long long st_rdev; /* Device number, if device. */
unsigned long long __pad1;
long long st_size; /* Size of file, in bytes. */
int st_blksize; /* Optimal block size for I/O. */
int __pad2;
long long st_blocks; /* Number 512-byte blocks allocated. */
int st_atime; /* Time of last access. */
unsigned int st_atime_nsec;
int st_mtime; /* Time of last modification. */
unsigned int st_mtime_nsec;
int st_ctime; /* Time of last status change. */
unsigned int st_ctime_nsec;
unsigned int __unused4;
unsigned int __unused5;
};
#endif /* _ASM_MICROBLAZE_STAT_H */
......@@ -9,7 +9,7 @@
#ifndef _ASM_MICROBLAZE_STRING_H
#define _ASM_MICROBLAZE_STRING_H
#ifndef __KERNEL__
#ifdef __KERNEL__
#define __HAVE_ARCH_MEMSET
#define __HAVE_ARCH_MEMCPY
......
......@@ -34,6 +34,9 @@ asmlinkage int sys_rt_sigsuspend(sigset_t __user *unewset, size_t sigsetsize,
asmlinkage int sys_sigaction(int sig, const struct old_sigaction *act,
struct old_sigaction *oact);
asmlinkage long sys_rt_sigaction(int sig, const struct sigaction __user *act,
struct sigaction __user *oact, size_t sigsetsize);
asmlinkage int sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss,
struct pt_regs *regs);
......
......@@ -122,6 +122,8 @@ static inline struct thread_info *current_thread_info(void)
#define TIF_SINGLESTEP 4
#define TIF_IRET 5 /* return with iret */
#define TIF_MEMDIE 6
#define TIF_SYSCALL_AUDIT 9 /* syscall auditing active */
#define TIF_SECCOMP 10 /* secure computing */
#define TIF_FREEZE 14 /* Freezing for suspend */
/* FIXME change in entry.S */
......@@ -138,10 +140,17 @@ static inline struct thread_info *current_thread_info(void)
#define _TIF_IRET (1<<TIF_IRET)
#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
#define _TIF_FREEZE (1<<TIF_FREEZE)
#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
#define _TIF_SECCOMP (1 << TIF_SECCOMP)
#define _TIF_KERNEL_TRACE (1 << TIF_KERNEL_TRACE)
/* work to do in syscall trace */
#define _TIF_WORK_SYSCALL_MASK (_TIF_SYSCALL_TRACE | _TIF_SINGLESTEP | \
_TIF_SYSCALL_AUDIT | _TIF_SECCOMP)
/* work to do on interrupt/exception return */
#define _TIF_WORK_MASK 0x0000FFFE
/* work to do on any return to u-space */
#define _TIF_ALLWORK_MASK 0x0000FFFF
......@@ -154,6 +163,17 @@ static inline struct thread_info *current_thread_info(void)
*/
/* FPU was used by this task this quantum (SMP) */
#define TS_USEDFPU 0x0001
#define TS_RESTORE_SIGMASK 0x0002
#ifndef __ASSEMBLY__
#define HAVE_SET_RESTORE_SIGMASK 1
static inline void set_restore_sigmask(void)
{
struct thread_info *ti = current_thread_info();
ti->status |= TS_RESTORE_SIGMASK;
set_bit(TIF_SIGPENDING, (unsigned long *)&ti->flags);
}
#endif
#endif /* __KERNEL__ */
#endif /* _ASM_MICROBLAZE_THREAD_INFO_H */
/*
* Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu>
* Copyright (C) 2008-2009 PetaLogix
* Copyright (C) 2006 Atmark Techno, Inc.
*
* This file is subject to the terms and conditions of the GNU General Public
......@@ -13,4 +15,10 @@
#include <asm-generic/tlb.h>
#ifdef CONFIG_MMU
#define tlb_start_vma(tlb, vma) do { } while (0)
#define tlb_end_vma(tlb, vma) do { } while (0)
#define __tlb_remove_tlb_entry(tlb, pte, address) do { } while (0)
#endif
#endif /* _ASM_MICROBLAZE_TLB_H */
/*
* Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu>
* Copyright (C) 2008-2009 PetaLogix
* Copyright (C) 2006 Atmark Techno, Inc.
*
* This file is subject to the terms and conditions of the GNU General Public
......@@ -9,6 +11,50 @@
#ifndef _ASM_MICROBLAZE_TLBFLUSH_H
#define _ASM_MICROBLAZE_TLBFLUSH_H
#ifdef CONFIG_MMU
#include <linux/sched.h>
#include <linux/threads.h>
#include <asm/processor.h> /* For TASK_SIZE */
#include <asm/mmu.h>
#include <asm/page.h>
#include <asm/pgalloc.h>
extern void _tlbie(unsigned long address);
extern void _tlbia(void);
#define __tlbia() _tlbia()
static inline void local_flush_tlb_all(void)
{ __tlbia(); }
static inline void local_flush_tlb_mm(struct mm_struct *mm)
{ __tlbia(); }
static inline void local_flush_tlb_page(struct vm_area_struct *vma,
unsigned long vmaddr)
{ _tlbie(vmaddr); }
static inline void local_flush_tlb_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end)
{ __tlbia(); }
#define flush_tlb_kernel_range(start, end) do { } while (0)
#define update_mmu_cache(vma, addr, pte) do { } while (0)
#define flush_tlb_all local_flush_tlb_all
#define flush_tlb_mm local_flush_tlb_mm
#define flush_tlb_page local_flush_tlb_page
#define flush_tlb_range local_flush_tlb_range
/*
* This is called in munmap when we have freed up some page-table
* pages. We don't need to do anything here, there's nothing special
* about our page-table pages. -- paulus
*/
static inline void flush_tlb_pgtables(struct mm_struct *mm,
unsigned long start, unsigned long end) { }
#else /* CONFIG_MMU */
#define flush_tlb() BUG()
#define flush_tlb_all() BUG()
#define flush_tlb_mm(mm) BUG()
......@@ -17,4 +63,6 @@
#define flush_tlb_pgtables(mm, start, end) BUG()
#define flush_tlb_kernel_range(start, end) BUG()
#endif /* CONFIG_MMU */
#endif /* _ASM_MICROBLAZE_TLBFLUSH_H */
/*
* Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu>
* Copyright (C) 2008-2009 PetaLogix
* Copyright (C) 2006 Atmark Techno, Inc.
*
* This file is subject to the terms and conditions of the GNU General Public
......@@ -26,6 +28,10 @@
#define VERIFY_READ 0
#define VERIFY_WRITE 1
#define __clear_user(addr, n) (memset((void *)(addr), 0, (n)), 0)
#ifndef CONFIG_MMU
extern int ___range_ok(unsigned long addr, unsigned long size);
#define __range_ok(addr, size) \
......@@ -34,13 +40,12 @@ extern int ___range_ok(unsigned long addr, unsigned long size);
#define access_ok(type, addr, size) (__range_ok((addr), (size)) == 0)
#define __access_ok(add, size) (__range_ok((addr), (size)) == 0)
extern inline int bad_user_access_length(void)
{
return 0;
}
/* Undefined function to trigger linker error */
extern int bad_user_access_length(void);
/* FIXME this is function for optimalization -> memcpy */
#define __get_user(var, ptr) \
({ \
({ \
int __gu_err = 0; \
switch (sizeof(*(ptr))) { \
case 1: \
......@@ -57,12 +62,13 @@ extern inline int bad_user_access_length(void)
break; \
} \
__gu_err; \
})
})
#define __get_user_bad() (bad_user_access_length(), (-EFAULT))
/* FIXME is not there defined __pu_val */
#define __put_user(var, ptr) \
({ \
({ \
int __pu_err = 0; \
switch (sizeof(*(ptr))) { \
case 1: \
......@@ -71,8 +77,8 @@ extern inline int bad_user_access_length(void)
*(ptr) = (var); \
break; \
case 8: { \
typeof(*(ptr)) __pu_val = var; \
memcpy(ptr, &__pu_val, sizeof(__pu_val));\
typeof(*(ptr)) __pu_val = (var); \
memcpy(ptr, &__pu_val, sizeof(__pu_val)); \
} \
break; \
default: \
......@@ -80,22 +86,22 @@ extern inline int bad_user_access_length(void)
break; \
} \
__pu_err; \
})
})
#define __put_user_bad() (bad_user_access_length(), (-EFAULT))
#define put_user(x, ptr) __put_user(x, ptr)
#define get_user(x, ptr) __get_user(x, ptr)
#define copy_to_user(to, from, n) (memcpy(to, from, n), 0)
#define copy_from_user(to, from, n) (memcpy(to, from, n), 0)
#define put_user(x, ptr) __put_user((x), (ptr))
#define get_user(x, ptr) __get_user((x), (ptr))
#define __copy_to_user(to, from, n) (copy_to_user(to, from, n))
#define __copy_from_user(to, from, n) (copy_from_user(to, from, n))
#define __copy_to_user_inatomic(to, from, n) (__copy_to_user(to, from, n))
#define __copy_from_user_inatomic(to, from, n) (__copy_from_user(to, from, n))
#define copy_to_user(to, from, n) (memcpy((to), (from), (n)), 0)
#define copy_from_user(to, from, n) (memcpy((to), (from), (n)), 0)
#define __clear_user(addr, n) (memset((void *)addr, 0, n), 0)
#define __copy_to_user(to, from, n) (copy_to_user((to), (from), (n)))
#define __copy_from_user(to, from, n) (copy_from_user((to), (from), (n)))
#define __copy_to_user_inatomic(to, from, n) \
(__copy_to_user((to), (from), (n)))
#define __copy_from_user_inatomic(to, from, n) \
(__copy_from_user((to), (from), (n)))
static inline unsigned long clear_user(void *addr, unsigned long size)
{
......@@ -107,10 +113,197 @@ static inline unsigned long clear_user(void *addr, unsigned long size)
/* Returns 0 if exception not found and fixup otherwise. */
extern unsigned long search_exception_table(unsigned long);
extern long strncpy_from_user(char *dst, const char *src, long count);
extern long strnlen_user(const char *src, long count);
#else /* CONFIG_MMU */
/*
* Address is valid if:
* - "addr", "addr + size" and "size" are all below the limit
*/
#define access_ok(type, addr, size) \
(get_fs().seg > (((unsigned long)(addr)) | \
(size) | ((unsigned long)(addr) + (size))))
/* || printk("access_ok failed for %s at 0x%08lx (size %d), seg 0x%08x\n",
type?"WRITE":"READ",addr,size,get_fs().seg)) */
/*
* All the __XXX versions macros/functions below do not perform
* access checking. It is assumed that the necessary checks have been
* already performed before the finction (macro) is called.
*/
#define get_user(x, ptr) \
({ \
access_ok(VERIFY_READ, (ptr), sizeof(*(ptr))) \
? __get_user((x), (ptr)) : -EFAULT; \
})
#define put_user(x, ptr) \
({ \
access_ok(VERIFY_WRITE, (ptr), sizeof(*(ptr))) \
? __put_user((x), (ptr)) : -EFAULT; \
})
#define __get_user(x, ptr) \
({ \
unsigned long __gu_val; \
/*unsigned long __gu_ptr = (unsigned long)(ptr);*/ \
long __gu_err; \
switch (sizeof(*(ptr))) { \
case 1: \
__get_user_asm("lbu", (ptr), __gu_val, __gu_err); \
break; \
case 2: \
__get_user_asm("lhu", (ptr), __gu_val, __gu_err); \
break; \
case 4: \
__get_user_asm("lw", (ptr), __gu_val, __gu_err); \
break; \
default: \
__gu_val = 0; __gu_err = -EINVAL; \
} \
x = (__typeof__(*(ptr))) __gu_val; \
__gu_err; \
})
#define __get_user_asm(insn, __gu_ptr, __gu_val, __gu_err) \
({ \
__asm__ __volatile__ ( \
"1:" insn " %1, %2, r0; \
addk %0, r0, r0; \
2: \
.section .fixup,\"ax\"; \
3: brid 2b; \
addik %0, r0, %3; \
.previous; \
.section __ex_table,\"a\"; \
.word 1b,3b; \
.previous;" \
: "=r"(__gu_err), "=r"(__gu_val) \
: "r"(__gu_ptr), "i"(-EFAULT) \
); \
})
#define __put_user(x, ptr) \
({ \
__typeof__(*(ptr)) __gu_val = x; \
long __gu_err = 0; \
switch (sizeof(__gu_val)) { \
case 1: \
__put_user_asm("sb", (ptr), __gu_val, __gu_err); \
break; \
case 2: \
__put_user_asm("sh", (ptr), __gu_val, __gu_err); \
break; \
case 4: \
__put_user_asm("sw", (ptr), __gu_val, __gu_err); \
break; \
case 8: \
__put_user_asm_8((ptr), __gu_val, __gu_err); \
break; \
default: \
__gu_err = -EINVAL; \
} \
__gu_err; \
})
#define __put_user_asm_8(__gu_ptr, __gu_val, __gu_err) \
({ \
__asm__ __volatile__ (" lwi %0, %1, 0; \
1: swi %0, %2, 0; \
lwi %0, %1, 4; \
2: swi %0, %2, 4; \
addk %0,r0,r0; \
3: \
.section .fixup,\"ax\"; \
4: brid 3b; \
addik %0, r0, %3; \
.previous; \
.section __ex_table,\"a\"; \
.word 1b,4b,2b,4b; \
.previous;" \
: "=&r"(__gu_err) \
: "r"(&__gu_val), \
"r"(__gu_ptr), "i"(-EFAULT) \
); \
})
#define __put_user_asm(insn, __gu_ptr, __gu_val, __gu_err) \
({ \
__asm__ __volatile__ ( \
"1:" insn " %1, %2, r0; \
addk %0, r0, r0; \
2: \
.section .fixup,\"ax\"; \
3: brid 2b; \
addik %0, r0, %3; \
.previous; \
.section __ex_table,\"a\"; \
.word 1b,3b; \
.previous;" \
: "=r"(__gu_err) \
: "r"(__gu_val), "r"(__gu_ptr), "i"(-EFAULT) \
); \
})
/*
* Return: number of not copied bytes, i.e. 0 if OK or non-zero if fail.
*/
static inline int clear_user(char *to, int size)
{
if (size && access_ok(VERIFY_WRITE, to, size)) {
__asm__ __volatile__ (" \
1: \
sb r0, %2, r0; \
addik %0, %0, -1; \
bneid %0, 1b; \
addik %2, %2, 1; \
2: \
.section __ex_table,\"a\"; \
.word 1b,2b; \
.section .text;" \
: "=r"(size) \
: "0"(size), "r"(to)
);
}
return size;
}
extern unsigned long __copy_tofrom_user(void __user *to,
const void __user *from, unsigned long size);
#define copy_to_user(to, from, n) \
(access_ok(VERIFY_WRITE, (to), (n)) ? \
__copy_tofrom_user((void __user *)(to), \
(__force const void __user *)(from), (n)) \
: -EFAULT)
#define __copy_to_user(to, from, n) copy_to_user((to), (from), (n))
#define __copy_to_user_inatomic(to, from, n) copy_to_user((to), (from), (n))
#define copy_from_user(to, from, n) \
(access_ok(VERIFY_READ, (from), (n)) ? \
__copy_tofrom_user((__force void __user *)(to), \
(void __user *)(from), (n)) \
: -EFAULT)
#define __copy_from_user(to, from, n) copy_from_user((to), (from), (n))
#define __copy_from_user_inatomic(to, from, n) \
copy_from_user((to), (from), (n))
extern int __strncpy_user(char *to, const char __user *from, int len);
extern int __strnlen_user(const char __user *sstr, int len);
#define strncpy_from_user(to, from, len) \
(access_ok(VERIFY_READ, from, 1) ? \
__strncpy_user(to, from, len) : -EFAULT)
#define strnlen_user(str, len) \
(access_ok(VERIFY_READ, str, 1) ? __strnlen_user(str, len) : 0)
extern long strncpy_from_user(char *dst, const char __user *src, long count);
extern long strnlen_user(const char __user *src, long count);
extern long __strncpy_from_user(char *dst, const char __user *src, long count);
#endif /* CONFIG_MMU */
/*
* The exception table consists of pairs of addresses: the first is the
......
......@@ -12,7 +12,8 @@
# ifdef __KERNEL__
# include <linux/unaligned/access_ok.h>
# include <linux/unaligned/be_struct.h>
# include <linux/unaligned/le_byteshift.h>
# include <linux/unaligned/generic.h>
# define get_unaligned __get_unaligned_be
......
......@@ -15,5 +15,6 @@ obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
obj-$(CONFIG_SELFMOD) += selfmod.o
obj-$(CONFIG_HEART_BEAT) += heartbeat.o
obj-$(CONFIG_MODULES) += microblaze_ksyms.o module.o
obj-$(CONFIG_MMU) += misc.o
obj-y += entry$(MMUEXT).o
/*
* Copyright (C) 2007-2009 Michal Simek <monstr@monstr.eu>
* Copyright (C) 2007-2009 PetaLogix
* Copyright (C) 2006 Atmark Techno, Inc.
*
......@@ -68,16 +69,26 @@ int main(int argc, char *argv[])
/* struct task_struct */
DEFINE(TS_THREAD_INFO, offsetof(struct task_struct, stack));
#ifdef CONFIG_MMU
DEFINE(TASK_STATE, offsetof(struct task_struct, state));
DEFINE(TASK_FLAGS, offsetof(struct task_struct, flags));
DEFINE(TASK_PTRACE, offsetof(struct task_struct, ptrace));
DEFINE(TASK_BLOCKED, offsetof(struct task_struct, blocked));
DEFINE(TASK_MM, offsetof(struct task_struct, mm));
DEFINE(TASK_ACTIVE_MM, offsetof(struct task_struct, active_mm));
DEFINE(TASK_PID, offsetof(struct task_struct, pid));
DEFINE(TASK_THREAD, offsetof(struct task_struct, thread));
DEFINE(THREAD_KSP, offsetof(struct thread_struct, ksp));
BLANK();
DEFINE(PGDIR, offsetof(struct thread_struct, pgdir));
BLANK();
#endif
/* struct thread_info */
DEFINE(TI_TASK, offsetof(struct thread_info, task));
DEFINE(TI_EXEC_DOMAIN, offsetof(struct thread_info, exec_domain));
DEFINE(TI_FLAGS, offsetof(struct thread_info, flags));
DEFINE(TI_STATUS, offsetof(struct thread_info, status));
DEFINE(TI_CPU, offsetof(struct thread_info, cpu));
DEFINE(TI_PRE_COUNT, offsetof(struct thread_info, preempt_count));
DEFINE(TI_ADDR_LIMIT, offsetof(struct thread_info, addr_limit));
DEFINE(TI_RESTART_BLOCK, offsetof(struct thread_info, restart_block));
DEFINE(TI_CPU_CONTEXT, offsetof(struct thread_info, cpu_context));
BLANK();
......
......@@ -87,6 +87,9 @@ int __init setup_early_printk(char *opt)
base_addr = early_uartlite_console();
if (base_addr) {
early_console_initialized = 1;
#ifdef CONFIG_MMU
early_console_reg_tlb_alloc(base_addr);
#endif
early_printk("early_printk_console is enabled at 0x%08x\n",
base_addr);
......
......@@ -10,7 +10,7 @@
#include <linux/linkage.h>
#include <asm/thread_info.h>
#include <asm/errno.h>
#include <linux/errno.h>
#include <asm/entry.h>
#include <asm/asm-offsets.h>
#include <asm/registers.h>
......
This diff is collapsed.
......@@ -21,9 +21,9 @@
#include <asm/exceptions.h>
#include <asm/entry.h> /* For KM CPU var */
#include <asm/uaccess.h>
#include <asm/errno.h>
#include <asm/ptrace.h>
#include <linux/uaccess.h>
#include <linux/errno.h>
#include <linux/ptrace.h>
#include <asm/current.h>
#define MICROBLAZE_ILL_OPCODE_EXCEPTION 0x02
......@@ -31,7 +31,7 @@
#define MICROBLAZE_DBUS_EXCEPTION 0x04
#define MICROBLAZE_DIV_ZERO_EXCEPTION 0x05
#define MICROBLAZE_FPU_EXCEPTION 0x06
#define MICROBLAZE_PRIVILEG_EXCEPTION 0x07
#define MICROBLAZE_PRIVILEGED_EXCEPTION 0x07
static DEFINE_SPINLOCK(die_lock);
......@@ -66,6 +66,11 @@ void _exception(int signr, struct pt_regs *regs, int code, unsigned long addr)
asmlinkage void full_exception(struct pt_regs *regs, unsigned int type,
int fsr, int addr)
{
#ifdef CONFIG_MMU
int code;
addr = regs->pc;
#endif
#if 0
printk(KERN_WARNING "Exception %02x in %s mode, FSR=%08x PC=%08x ESR=%08x\n",
type, user_mode(regs) ? "user" : "kernel", fsr,
......@@ -74,7 +79,13 @@ asmlinkage void full_exception(struct pt_regs *regs, unsigned int type,
switch (type & 0x1F) {
case MICROBLAZE_ILL_OPCODE_EXCEPTION:
if (user_mode(regs)) {
printk(KERN_WARNING "Illegal opcode exception in user mode.\n");
_exception(SIGILL, regs, ILL_ILLOPC, addr);
return;
}
printk(KERN_WARNING "Illegal opcode exception in kernel mode.\n");
die("opcode exception", regs, SIGBUS);
break;
case MICROBLAZE_IBUS_EXCEPTION:
if (user_mode(regs)) {
......@@ -95,11 +106,16 @@ asmlinkage void full_exception(struct pt_regs *regs, unsigned int type,
die("bus exception", regs, SIGBUS);
break;
case MICROBLAZE_DIV_ZERO_EXCEPTION:
printk(KERN_WARNING "Divide by zero exception\n");
if (user_mode(regs)) {
printk(KERN_WARNING "Divide by zero exception in user mode\n");
_exception(SIGILL, regs, ILL_ILLOPC, addr);
return;
}
printk(KERN_WARNING "Divide by zero exception in kernel mode.\n");
die("Divide by exception", regs, SIGBUS);
break;
case MICROBLAZE_FPU_EXCEPTION:
printk(KERN_WARNING "FPU exception\n");
/* IEEE FP exception */
/* I removed fsr variable and use code var for storing fsr */
if (fsr & FSR_IO)
......@@ -115,7 +131,20 @@ asmlinkage void full_exception(struct pt_regs *regs, unsigned int type,
_exception(SIGFPE, regs, fsr, addr);
break;
#ifdef CONFIG_MMU
case MICROBLAZE_PRIVILEGED_EXCEPTION:
printk(KERN_WARNING "Privileged exception\n");
/* "brk r0,r0" - used as debug breakpoint */
if (get_user(code, (unsigned long *)regs->pc) == 0
&& code == 0x980c0000) {
_exception(SIGTRAP, regs, TRAP_BRKPT, addr);
} else {
_exception(SIGILL, regs, ILL_PRVOPC, addr);
}
break;
#endif
default:
/* FIXME what to do in unexpected exception */
printk(KERN_WARNING "Unexpected exception %02x "
"PC=%08x in %s mode\n", type, (unsigned int) addr,
kernel_mode(regs) ? "kernel" : "user");
......
This diff is collapsed.
......@@ -45,3 +45,5 @@ extern void __udivsi3(void);
EXPORT_SYMBOL(__udivsi3);
extern void __umodsi3(void);
EXPORT_SYMBOL(__umodsi3);
extern char *_ebss;
EXPORT_SYMBOL_GPL(_ebss);
/*
* Miscellaneous low-level MMU functions.
*
* Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu>
* Copyright (C) 2008-2009 PetaLogix
* Copyright (C) 2007 Xilinx, Inc. All rights reserved.
*
* Derived from arch/ppc/kernel/misc.S
*
* This file is subject to the terms and conditions of the GNU General
* Public License. See the file COPYING in the main directory of this
* archive for more details.
*/
#include <linux/linkage.h>
#include <linux/sys.h>
#include <asm/unistd.h>
#include <linux/errno.h>
#include <asm/mmu.h>
#include <asm/page.h>
.text
/*
* Flush MMU TLB
*
* We avoid flushing the pinned 0, 1 and possibly 2 entries.
*/
.globl _tlbia;
.align 4;
_tlbia:
addik r12, r0, 63 /* flush all entries (63 - 3) */
/* isync */
_tlbia_1:
mts rtlbx, r12
nop
mts rtlbhi, r0 /* flush: ensure V is clear */
nop
addik r11, r12, -2
bneid r11, _tlbia_1 /* loop for all entries */
addik r12, r12, -1
/* sync */
rtsd r15, 8
nop
/*
* Flush MMU TLB for a particular address (in r5)
*/
.globl _tlbie;
.align 4;
_tlbie:
mts rtlbsx, r5 /* look up the address in TLB */
nop
mfs r12, rtlbx /* Retrieve index */
nop
blti r12, _tlbie_1 /* Check if found */
mts rtlbhi, r0 /* flush: ensure V is clear */
nop
_tlbie_1:
rtsd r15, 8
nop
/*
* Allocate TLB entry for early console
*/
.globl early_console_reg_tlb_alloc;
.align 4;
early_console_reg_tlb_alloc:
/*
* Load a TLB entry for the UART, so that microblaze_progress() can use
* the UARTs nice and early. We use a 4k real==virtual mapping.
*/
ori r4, r0, 63
mts rtlbx, r4 /* TLB slot 2 */
or r4,r5,r0
andi r4,r4,0xfffff000
ori r4,r4,(TLB_WR|TLB_I|TLB_M|TLB_G)
andi r5,r5,0xfffff000
ori r5,r5,(TLB_VALID | TLB_PAGESZ(PAGESZ_4K))
mts rtlblo,r4 /* Load the data portion of the entry */
nop
mts rtlbhi,r5 /* Load the tag portion of the entry */
nop
rtsd r15, 8
nop
/*
* Copy a whole page (4096 bytes).
*/
#define COPY_16_BYTES \
lwi r7, r6, 0; \
lwi r8, r6, 4; \
lwi r9, r6, 8; \
lwi r10, r6, 12; \
swi r7, r5, 0; \
swi r8, r5, 4; \
swi r9, r5, 8; \
swi r10, r5, 12
/* FIXME DCACHE_LINE_BYTES (CONFIG_XILINX_MICROBLAZE0_DCACHE_LINE_LEN * 4)*/
#define DCACHE_LINE_BYTES (4 * 4)
.globl copy_page;
.align 4;
copy_page:
ori r11, r0, (PAGE_SIZE/DCACHE_LINE_BYTES) - 1
_copy_page_loop:
COPY_16_BYTES
#if DCACHE_LINE_BYTES >= 32
COPY_16_BYTES
#endif
addik r6, r6, DCACHE_LINE_BYTES
addik r5, r5, DCACHE_LINE_BYTES
bneid r11, _copy_page_loop
addik r11, r11, -1
rtsd r15, 8
nop
This diff is collapsed.
......@@ -509,12 +509,13 @@ static void __init early_init_dt_check_for_initrd(unsigned long node)
prop = of_get_flat_dt_prop(node, "linux,initrd-start", &l);
if (prop) {
initrd_start = (unsigned long)__va(of_read_ulong(prop, l/4));
initrd_start = (unsigned long)
__va((u32)of_read_ulong(prop, l/4));
prop = of_get_flat_dt_prop(node, "linux,initrd-end", &l);
if (prop) {
initrd_end = (unsigned long)
__va(of_read_ulong(prop, l/4));
__va((u32)of_read_ulong(prop, 1/4));
initrd_below_start_ok = 1;
} else {
initrd_start = 0;
......@@ -563,7 +564,9 @@ static int __init early_init_dt_scan_chosen(unsigned long node,
strlcpy(cmd_line, p, min((int)l, COMMAND_LINE_SIZE));
#ifdef CONFIG_CMDLINE
#ifndef CONFIG_CMDLINE_FORCE
if (p == NULL || l == 0 || (l == 1 && (*p) == 0))
#endif
strlcpy(cmd_line, CONFIG_CMDLINE, COMMAND_LINE_SIZE);
#endif /* CONFIG_CMDLINE */
......
This diff is collapsed.
This diff is collapsed.
......@@ -2,7 +2,11 @@ ENTRY(sys_call_table)
.long sys_restart_syscall /* 0 - old "setup()" system call,
* used for restarting */
.long sys_exit
.long sys_ni_syscall /* was fork */
#ifdef CONFIG_MMU
.long sys_fork_wrapper
#else
.long sys_ni_syscall
#endif
.long sys_read
.long sys_write
.long sys_open /* 5 */
......
This diff is collapsed.
......@@ -17,8 +17,7 @@ ENTRY(_start)
jiffies = jiffies_64 + 4;
SECTIONS {
. = CONFIG_KERNEL_BASE_ADDR;
. = CONFIG_KERNEL_START;
.text : {
_text = . ;
_stext = . ;
......@@ -132,6 +131,8 @@ SECTIONS {
__con_initcall_end = .;
}
SECURITY_INIT
__init_end_before_initramfs = .;
.init.ramfs ALIGN(4096) : {
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment