Commit c87d5d59 authored by Linus Torvalds's avatar Linus Torvalds

Merge Qualcom Hexagon architecture

This is the fifth version of the patchset (with one tiny whitespace fix)
to the Linux kernel to support the Qualcomm Hexagon architecture.

Between now and the next pull requests, Richard Kuo should have his key
signed, etc., and should be back on kernel.org.  In the meantime, this
got merged as a emailed patch-series.

* Hexagon: (36 commits)
  Add extra arch overrides to asm-generic/checksum.h
  Hexagon: Add self to MAINTAINERS
  Hexagon: Add basic stacktrace functionality for Hexagon architecture.
  Hexagon: Add configuration and makefiles for the Hexagon architecture.
  Hexagon: Comet platform support
  Hexagon: kgdb support files
  Hexagon: Add page-fault support.
  Hexagon: Add page table header files & etc.
  Hexagon: Add ioremap support
  Hexagon: Provide DMA implementation
  Hexagon: Implement basic TLB management routines for Hexagon.
  Hexagon: Implement basic cache-flush support
  Hexagon: Provide basic implementation and/or stubs for I/O routines.
  Hexagon: Add user access functions
  Hexagon: Add locking types and functions
  Hexagon: Add SMP support
  Hexagon: Provide basic debugging and system trap support.
  Hexagon: Add ptrace support
  Hexagon: Add time and timer functions
  Hexagon: Add interrupts
  ...
parents 094803e0 4e29198e
...@@ -5359,6 +5359,12 @@ F: fs/qnx4/ ...@@ -5359,6 +5359,12 @@ F: fs/qnx4/
F: include/linux/qnx4_fs.h F: include/linux/qnx4_fs.h
F: include/linux/qnxtypes.h F: include/linux/qnxtypes.h
QUALCOMM HEXAGON ARCHITECTURE
M: Richard Kuo <rkuo@codeaurora.org>
L: linux-hexagon@vger.kernel.org
S: Supported
F: arch/hexagon/
RADOS BLOCK DEVICE (RBD) RADOS BLOCK DEVICE (RBD)
F: include/linux/qnxtypes.h F: include/linux/qnxtypes.h
M: Yehuda Sadeh <yehuda@hq.newdream.net> M: Yehuda Sadeh <yehuda@hq.newdream.net>
......
# Hexagon configuration
comment "Linux Kernel Configuration for Hexagon"
config HEXAGON
def_bool y
select HAVE_OPROFILE
select USE_GENERIC_SMP_HELPERS if SMP
# Other pending projects/to-do items.
# select HAVE_REGS_AND_STACK_ACCESS_API
# select HAVE_HW_BREAKPOINT if PERF_EVENTS
# select ARCH_HAS_CPU_IDLE_WAIT
# select ARCH_WANT_OPTIONAL_GPIOLIB
# select ARCH_REQUIRE_GPIOLIB
# select HAVE_CLK
# select IRQ_PER_CPU
select HAVE_IRQ_WORK
# select GENERIC_PENDING_IRQ if SMP
select GENERIC_ATOMIC64
select HAVE_PERF_EVENTS
select HAVE_GENERIC_HARDIRQS
select GENERIC_HARDIRQS_NO__DO_IRQ
select GENERIC_HARDIRQS_NO_DEPRECATED
# GENERIC_ALLOCATOR is used by dma_alloc_coherent()
select GENERIC_ALLOCATOR
select GENERIC_IRQ_SHOW
select HAVE_ARCH_KGDB
select HAVE_ARCH_TRACEHOOK
select NO_IOPORT
# mostly generic routines, with some accelerated ones
---help---
Qualcomm Hexagon is a processor architecture designed for high
performance and low power across a wide variety of applications.
config HEXAGON_ARCH_V1
bool
config HEXAGON_ARCH_V2
bool
config HEXAGON_ARCH_V3
bool
config HEXAGON_ARCH_V4
bool
config FRAME_POINTER
def_bool y
config LOCKDEP_SUPPORT
def_bool y
config PCI
def_bool n
config EARLY_PRINTK
def_bool y
config KTIME_SCALAR
def_bool y
config MMU
def_bool y
config TRACE_IRQFLAGS_SUPPORT
def_bool y
config GENERIC_CSUM
def_bool y
#
# Use the generic interrupt handling code in kernel/irq/:
#
config GENERIC_IRQ_PROBE
def_bool y
config GENERIC_IOMAP
def_bool y
#config ZONE_DMA
# bool
# default y
config HAS_DMA
bool
select HAVE_DMA_ATTRS
default y
config NEED_SG_DMA_LENGTH
def_bool y
config RWSEM_GENERIC_SPINLOCK
def_bool n
config RWSEM_XCHGADD_ALGORITHM
def_bool y
config GENERIC_FIND_NEXT_BIT
def_bool y
config GENERIC_HWEIGHT
def_bool y
config GENERIC_TIME
def_bool y
config GENERIC_CLOCKEVENTS
def_bool y
config GENERIC_CLOCKEVENTS_BROADCAST
def_bool y
config STACKTRACE_SUPPORT
def_bool y
select STACKTRACE
config GENERIC_BUG
def_bool y
depends on BUG
config BUG
def_bool y
menu "Machine selection"
choice
prompt "System type"
default HEXAGON_ARCH_V2
config HEXAGON_COMET
bool "Comet Board"
select HEXAGON_ARCH_V2
---help---
Support for the Comet platform.
endchoice
config HEXAGON_VM
def_bool y
config CMDLINE
string "Default kernel command string"
default ""
help
On some platforms, there is currently no way for the boot loader
to pass arguments to the kernel. For these, you should supply some
command-line options at build time by entering them here. At a
minimum, you should specify the memory size and the root device
(e.g., mem=64M root=/dev/nfs).
config HEXAGON_ANGEL_TRAPS
bool "Use Angel Traps"
default n
---help---
Enable angel debug traps (for printk's).
config SMP
bool "Multi-Processing support"
---help---
Enables SMP support in the kernel. If unsure, say "Y"
config NR_CPUS
int "Maximum number of CPUs" if SMP
range 2 6 if SMP
default "1" if !SMP
default "6" if SMP
---help---
This allows you to specify the maximum number of CPUs which this
kernel will support. The maximum supported value is 6 and the
minimum value which makes sense is 2.
This is purely to save memory - each supported CPU adds
approximately eight kilobytes to the kernel image.
choice
prompt "Kernel page size"
default PAGE_SIZE_4KB
---help---
Changes the default page size; use with caution.
config PAGE_SIZE_4KB
bool "4KB"
config PAGE_SIZE_16KB
bool "16KB"
config PAGE_SIZE_64KB
bool "64KB"
config PAGE_SIZE_256KB
bool "256KB"
endchoice
source "mm/Kconfig"
source "kernel/Kconfig.hz"
source "kernel/time/Kconfig"
config GENERIC_GPIO
bool "Generic GPIO support"
default n
endmenu
source "init/Kconfig"
source "drivers/Kconfig"
source "fs/Kconfig"
menu "Executable File Formats"
source "fs/Kconfig.binfmt"
endmenu
source "net/Kconfig"
source "security/Kconfig"
source "crypto/Kconfig"
source "lib/Kconfig"
menu "Kernel hacking"
source "lib/Kconfig.debug"
endmenu
# Makefile for the Hexagon arch
KBUILD_DEFCONFIG = comet_defconfig
# Do not use GP-relative jumps
KBUILD_CFLAGS += -G0
LDFLAGS_vmlinux += -G0
# Do not use single-byte enums; these will overflow.
KBUILD_CFLAGS += -fno-short-enums
# Modules must use either long-calls, or use pic/plt.
# Use long-calls for now, it's easier. And faster.
# CFLAGS_MODULE += -fPIC
# LDFLAGS_MODULE += -shared
CFLAGS_MODULE += -mlong-calls
cflags-$(CONFIG_HEXAGON_ARCH_V1) += $(call cc-option,-mv1)
cflags-$(CONFIG_HEXAGON_ARCH_V2) += $(call cc-option,-mv2)
cflags-$(CONFIG_HEXAGON_ARCH_V3) += $(call cc-option,-mv3)
cflags-$(CONFIG_HEXAGON_ARCH_V4) += $(call cc-option,-mv4)
aflags-$(CONFIG_HEXAGON_ARCH_V1) += $(call cc-option,-mv1)
aflags-$(CONFIG_HEXAGON_ARCH_V2) += $(call cc-option,-mv2)
aflags-$(CONFIG_HEXAGON_ARCH_V3) += $(call cc-option,-mv3)
aflags-$(CONFIG_HEXAGON_ARCH_V4) += $(call cc-option,-mv4)
ldflags-$(CONFIG_HEXAGON_ARCH_V1) += $(call cc-option,-mv1)
ldflags-$(CONFIG_HEXAGON_ARCH_V2) += $(call cc-option,-mv2)
ldflags-$(CONFIG_HEXAGON_ARCH_V3) += $(call cc-option,-mv3)
ldflags-$(CONFIG_HEXAGON_ARCH_V4) += $(call cc-option,-mv4)
KBUILD_CFLAGS += $(cflags-y)
KBUILD_AFLAGS += $(aflags-y)
# no KBUILD_LDFLAGS?
LDFLAGS += $(ldflags-y)
# Thread-info register will be r19. This value is not configureable;
# it is hard-coded in several files.
TIR_NAME := r19
KBUILD_CFLAGS += -ffixed-$(TIR_NAME) -DTHREADINFO_REG=$(TIR_NAME) -D__linux__
KBUILD_AFLAGS += -DTHREADINFO_REG=$(TIR_NAME)
LIBGCC := $(shell $(CC) $(KBUILD_CFLAGS) -print-libgcc-file-name)
libs-y += $(LIBGCC)
head-y := arch/hexagon/kernel/head.o \
arch/hexagon/kernel/init_task.o
core-y += arch/hexagon/kernel/ \
arch/hexagon/mm/ \
arch/hexagon/lib/
# arch/hexagon/platform/common/
#
#core-$(CONFIG_HEXAGON_COMET) += arch/hexagon/platform/comet/
#machine-$(CONFIG_HEXAGON_COMET) := comet
CONFIG_SMP=y
CONFIG_DEFAULT_MMAP_MIN_ADDR=0
CONFIG_HZ_100=y
CONFIG_EXPERIMENTAL=y
CONFIG_CROSS_COMPILE="hexagon-"
CONFIG_LOCALVERSION="-smp"
# CONFIG_LOCALVERSION_AUTO is not set
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
CONFIG_BSD_PROCESS_ACCT=y
CONFIG_BSD_PROCESS_ACCT_V3=y
CONFIG_TASKSTATS=y
CONFIG_TASK_DELAY_ACCT=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=18
CONFIG_BLK_DEV_INITRD=y
CONFIG_EMBEDDED=y
# CONFIG_VM_EVENT_COUNTERS is not set
# CONFIG_BLK_DEV_BSG is not set
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
# CONFIG_STANDALONE is not set
CONFIG_CONNECTOR=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_NBD=y
CONFIG_NETDEVICES=y
CONFIG_MII=y
CONFIG_PHYLIB=y
CONFIG_NET_ETHERNET=y
# CONFIG_NETDEV_1000 is not set
# CONFIG_NETDEV_10000 is not set
# CONFIG_INPUT_MOUSEDEV is not set
# CONFIG_INPUT_KEYBOARD is not set
# CONFIG_INPUT_MOUSE is not set
# CONFIG_SERIO is not set
# CONFIG_CONSOLE_TRANSLATIONS is not set
CONFIG_LEGACY_PTY_COUNT=64
# CONFIG_DEVKMEM is not set
# CONFIG_HW_RANDOM is not set
CONFIG_SPI=y
CONFIG_SPI_DEBUG=y
CONFIG_SPI_BITBANG=y
# CONFIG_HWMON is not set
# CONFIG_VGA_CONSOLE is not set
# CONFIG_HID_SUPPORT is not set
# CONFIG_USB_SUPPORT is not set
CONFIG_EXT2_FS=y
CONFIG_EXT2_FS_XATTR=y
CONFIG_EXT2_FS_POSIX_ACL=y
CONFIG_EXT2_FS_SECURITY=y
CONFIG_EXT3_FS=y
# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
CONFIG_EXT3_FS_POSIX_ACL=y
CONFIG_EXT3_FS_SECURITY=y
CONFIG_QUOTA=y
CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
CONFIG_TMPFS_POSIX_ACL=y
# CONFIG_MISC_FILESYSTEMS is not set
CONFIG_NFS_FS=y
CONFIG_NFS_V3=y
CONFIG_NFS_V3_ACL=y
# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
CONFIG_INET=y
# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
# CONFIG_INET_XFRM_MODE_TUNNEL is not set
# CONFIG_INET_XFRM_MODE_BEET is not set
# CONFIG_INET_LRO is not set
# CONFIG_INET_DIAG is not set
# CONFIG_IPV6 is not set
CONFIG_CRYPTO_MD5=y
# CONFIG_CRYPTO_ANSI_CPRNG is not set
# CONFIG_CRYPTO_HW is not set
CONFIG_CRC_CCITT=y
CONFIG_CRC16=y
CONFIG_CRC_T10DIF=y
CONFIG_LIBCRC32C=y
CONFIG_FRAME_WARN=0
CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_FS=y
# CONFIG_SCHED_DEBUG is not set
CONFIG_DEBUG_INFO=y
include include/asm-generic/Kbuild.asm
header-y += registers.h
header-y += ucontext.h
header-y += user.h
generic-y += auxvec.h
generic-y += bug.h
generic-y += bugs.h
generic-y += cpumask.h
generic-y += cputime.h
generic-y += current.h
generic-y += device.h
generic-y += div64.h
generic-y += emergency-restart.h
generic-y += errno.h
generic-y += fb.h
generic-y += fcntl.h
generic-y += ftrace.h
generic-y += hardirq.h
generic-y += hw_irq.h
generic-y += ioctl.h
generic-y += ioctls.h
generic-y += iomap.h
generic-y += ipcbuf.h
generic-y += ipc.h
generic-y += irq_regs.h
generic-y += kdebug.h
generic-y += kmap_types.h
generic-y += local64.h
generic-y += local.h
generic-y += local.h
generic-y += mman.h
generic-y += msgbuf.h
generic-y += pci.h
generic-y += percpu.h
generic-y += poll.h
generic-y += posix_types.h
generic-y += resource.h
generic-y += rwsem.h
generic-y += scatterlist.h
generic-y += sections.h
generic-y += segment.h
generic-y += sembuf.h
generic-y += shmbuf.h
generic-y += shmparam.h
generic-y += siginfo.h
generic-y += socket.h
generic-y += sockios.h
generic-y += statfs.h
generic-y += stat.h
generic-y += termbits.h
generic-y += termios.h
generic-y += topology.h
generic-y += types.h
generic-y += ucontext.h
generic-y += unaligned.h
generic-y += xor.h
#include <generated/asm-offsets.h>
/*
* Atomic operations for the Hexagon architecture
*
* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
*
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*/
#ifndef _ASM_ATOMIC_H
#define _ASM_ATOMIC_H
#include <linux/types.h>
#define ATOMIC_INIT(i) { (i) }
#define atomic_set(v, i) ((v)->counter = (i))
/**
* atomic_read - reads a word, atomically
* @v: pointer to atomic value
*
* Assumes all word reads on our architecture are atomic.
*/
#define atomic_read(v) ((v)->counter)
/**
* atomic_xchg - atomic
* @v: pointer to memory to change
* @new: new value (technically passed in a register -- see xchg)
*/
#define atomic_xchg(v, new) (xchg(&((v)->counter), (new)))
/**
* atomic_cmpxchg - atomic compare-and-exchange values
* @v: pointer to value to change
* @old: desired old value to match
* @new: new value to put in
*
* Parameters are then pointer, value-in-register, value-in-register,
* and the output is the old value.
*
* Apparently this is complicated for archs that don't support
* the memw_locked like we do (or it's broken or whatever).
*
* Kind of the lynchpin of the rest of the generically defined routines.
* Remember V2 had that bug with dotnew predicate set by memw_locked.
*
* "old" is "expected" old val, __oldval is actual old value
*/
static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
{
int __oldval;
asm volatile(
"1: %0 = memw_locked(%1);\n"
" { P0 = cmp.eq(%0,%2);\n"
" if (!P0.new) jump:nt 2f; }\n"
" memw_locked(%1,P0) = %3;\n"
" if (!P0) jump 1b;\n"
"2:\n"
: "=&r" (__oldval)
: "r" (&v->counter), "r" (old), "r" (new)
: "memory", "p0"
);
return __oldval;
}
static inline int atomic_add_return(int i, atomic_t *v)
{
int output;
__asm__ __volatile__ (
"1: %0 = memw_locked(%1);\n"
" %0 = add(%0,%2);\n"
" memw_locked(%1,P3)=%0;\n"
" if !P3 jump 1b;\n"
: "=&r" (output)
: "r" (&v->counter), "r" (i)
: "memory", "p3"
);
return output;
}
#define atomic_add(i, v) atomic_add_return(i, (v))
static inline int atomic_sub_return(int i, atomic_t *v)
{
int output;
__asm__ __volatile__ (
"1: %0 = memw_locked(%1);\n"
" %0 = sub(%0,%2);\n"
" memw_locked(%1,P3)=%0\n"
" if !P3 jump 1b;\n"
: "=&r" (output)
: "r" (&v->counter), "r" (i)
: "memory", "p3"
);
return output;
}
#define atomic_sub(i, v) atomic_sub_return(i, (v))
/**
* atomic_add_unless - add unless the number is a given value
* @v: pointer to value
* @a: amount to add
* @u: unless value is equal to u
*
* Returns 1 if the add happened, 0 if it didn't.
*/
static inline int __atomic_add_unless(atomic_t *v, int a, int u)
{
int output, __oldval;
asm volatile(
"1: %0 = memw_locked(%2);"
" {"
" p3 = cmp.eq(%0, %4);"
" if (p3.new) jump:nt 2f;"
" %0 = add(%0, %3);"
" %1 = #0;"
" }"
" memw_locked(%2, p3) = %0;"
" {"
" if !p3 jump 1b;"
" %1 = #1;"
" }"
"2:"
: "=&r" (__oldval), "=&r" (output)
: "r" (v), "r" (a), "r" (u)
: "memory", "p3"
);
return output;
}
#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
#define atomic_inc(v) atomic_add(1, (v))
#define atomic_dec(v) atomic_sub(1, (v))
#define atomic_inc_and_test(v) (atomic_add_return(1, (v)) == 0)
#define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0)
#define atomic_sub_and_test(i, v) (atomic_sub_return(i, (v)) == 0)
#define atomic_add_negative(i, v) (atomic_add_return(i, (v)) < 0)
#define atomic_inc_return(v) (atomic_add_return(1, v))
#define atomic_dec_return(v) (atomic_sub_return(1, v))
#endif
/*
* Bit operations for the Hexagon architecture
*
* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
*
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*/
#ifndef _ASM_BITOPS_H
#define _ASM_BITOPS_H
#include <linux/compiler.h>
#include <asm/byteorder.h>
#include <asm/system.h>
#include <asm/atomic.h>
#ifdef __KERNEL__
#define smp_mb__before_clear_bit() barrier()
#define smp_mb__after_clear_bit() barrier()
/*
* The offset calculations for these are based on BITS_PER_LONG == 32
* (i.e. I get to shift by #5-2 (32 bits per long, 4 bytes per access),
* mask by 0x0000001F)
*
* Typically, R10 is clobbered for address, R11 bit nr, and R12 is temp
*/
/**
* test_and_clear_bit - clear a bit and return its old value
* @nr: bit number to clear
* @addr: pointer to memory
*/
static inline int test_and_clear_bit(int nr, volatile void *addr)
{
int oldval;
__asm__ __volatile__ (
" {R10 = %1; R11 = asr(%2,#5); }\n"
" {R10 += asl(R11,#2); R11 = and(%2,#0x1f)}\n"
"1: R12 = memw_locked(R10);\n"
" { P0 = tstbit(R12,R11); R12 = clrbit(R12,R11); }\n"
" memw_locked(R10,P1) = R12;\n"
" {if !P1 jump 1b; %0 = mux(P0,#1,#0);}\n"
: "=&r" (oldval)
: "r" (addr), "r" (nr)
: "r10", "r11", "r12", "p0", "p1", "memory"
);
return oldval;
}
/**
* test_and_set_bit - set a bit and return its old value
* @nr: bit number to set
* @addr: pointer to memory
*/
static inline int test_and_set_bit(int nr, volatile void *addr)
{
int oldval;
__asm__ __volatile__ (
" {R10 = %1; R11 = asr(%2,#5); }\n"
" {R10 += asl(R11,#2); R11 = and(%2,#0x1f)}\n"
"1: R12 = memw_locked(R10);\n"
" { P0 = tstbit(R12,R11); R12 = setbit(R12,R11); }\n"
" memw_locked(R10,P1) = R12;\n"
" {if !P1 jump 1b; %0 = mux(P0,#1,#0);}\n"
: "=&r" (oldval)
: "r" (addr), "r" (nr)
: "r10", "r11", "r12", "p0", "p1", "memory"
);
return oldval;
}
/**
* test_and_change_bit - toggle a bit and return its old value
* @nr: bit number to set
* @addr: pointer to memory
*/
static inline int test_and_change_bit(int nr, volatile void *addr)
{
int oldval;
__asm__ __volatile__ (
" {R10 = %1; R11 = asr(%2,#5); }\n"
" {R10 += asl(R11,#2); R11 = and(%2,#0x1f)}\n"
"1: R12 = memw_locked(R10);\n"
" { P0 = tstbit(R12,R11); R12 = togglebit(R12,R11); }\n"
" memw_locked(R10,P1) = R12;\n"
" {if !P1 jump 1b; %0 = mux(P0,#1,#0);}\n"
: "=&r" (oldval)
: "r" (addr), "r" (nr)
: "r10", "r11", "r12", "p0", "p1", "memory"
);
return oldval;
}
/*
* Atomic, but doesn't care about the return value.
* Rewrite later to save a cycle or two.
*/
static inline void clear_bit(int nr, volatile void *addr)
{
test_and_clear_bit(nr, addr);
}
static inline void set_bit(int nr, volatile void *addr)
{
test_and_set_bit(nr, addr);
}
static inline void change_bit(int nr, volatile void *addr)
{
test_and_change_bit(nr, addr);
}
/*
* These are allowed to be non-atomic. In fact the generic flavors are
* in non-atomic.h. Would it be better to use intrinsics for this?
*
* OK, writes in our architecture do not invalidate LL/SC, so this has to
* be atomic, particularly for things like slab_lock and slab_unlock.
*
*/
static inline void __clear_bit(int nr, volatile unsigned long *addr)
{
test_and_clear_bit(nr, addr);
}
static inline void __set_bit(int nr, volatile unsigned long *addr)
{
test_and_set_bit(nr, addr);
}
static inline void __change_bit(int nr, volatile unsigned long *addr)
{
test_and_change_bit(nr, addr);
}
/* Apparently, at least some of these are allowed to be non-atomic */
static inline int __test_and_clear_bit(int nr, volatile unsigned long *addr)
{
return test_and_clear_bit(nr, addr);
}
static inline int __test_and_set_bit(int nr, volatile unsigned long *addr)
{
return test_and_set_bit(nr, addr);
}
static inline int __test_and_change_bit(int nr, volatile unsigned long *addr)
{
return test_and_change_bit(nr, addr);
}
static inline int __test_bit(int nr, const volatile unsigned long *addr)
{
int retval;
asm volatile(
"{P0 = tstbit(%1,%2); if (P0.new) %0 = #1; if (!P0.new) %0 = #0;}\n"
: "=&r" (retval)
: "r" (addr[BIT_WORD(nr)]), "r" (nr % BITS_PER_LONG)
: "p0"
);
return retval;
}
#define test_bit(nr, addr) __test_bit(nr, addr)
/*
* ffz - find first zero in word.
* @word: The word to search
*
* Undefined if no zero exists, so code should check against ~0UL first.
*/
static inline long ffz(int x)
{
int r;
asm("%0 = ct1(%1);\n"
: "=&r" (r)
: "r" (x));
return r;
}
/*
* fls - find last (most-significant) bit set
* @x: the word to search
*
* This is defined the same way as ffs.
* Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
*/
static inline long fls(int x)
{
int r;
asm("{ %0 = cl0(%1);}\n"
"%0 = sub(#32,%0);\n"
: "=&r" (r)
: "r" (x)
: "p0");
return r;
}
/*
* ffs - find first bit set
* @x: the word to search
*
* This is defined the same way as
* the libc and compiler builtin ffs routines, therefore
* differs in spirit from the above ffz (man ffs).
*/
static inline long ffs(int x)
{
int r;
asm("{ P0 = cmp.eq(%1,#0); %0 = ct0(%1);}\n"
"{ if P0 %0 = #0; if !P0 %0 = add(%0,#1);}\n"
: "=&r" (r)
: "r" (x)
: "p0");
return r;
}
/*
* __ffs - find first bit in word.
* @word: The word to search
*
* Undefined if no bit exists, so code should check against 0 first.
*
* bits_per_long assumed to be 32
* numbering starts at 0 I think (instead of 1 like ffs)
*/
static inline unsigned long __ffs(unsigned long word)
{
int num;
asm("%0 = ct0(%1);\n"
: "=&r" (num)
: "r" (word));
return num;
}
/*
* __fls - find last (most-significant) set bit in a long word
* @word: the word to search
*
* Undefined if no set bit exists, so code should check against 0 first.
* bits_per_long assumed to be 32
*/
static inline unsigned long __fls(unsigned long word)
{
int num;
asm("%0 = cl0(%1);\n"
"%0 = sub(#31,%0);\n"
: "=&r" (num)
: "r" (word));
return num;
}
#include <asm-generic/bitops/lock.h>
#include <asm-generic/bitops/find.h>
#include <asm-generic/bitops/fls64.h>
#include <asm-generic/bitops/sched.h>
#include <asm-generic/bitops/hweight.h>
#include <asm-generic/bitops/le.h>
#include <asm-generic/bitops/ext2-atomic.h>
#endif /* __KERNEL__ */
#endif
/*
* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*/
#ifndef __ASM_HEXAGON_BITSPERLONG_H
#define __ASM_HEXAGON_BITSPERLONG_H
#define __BITS_PER_LONG 32
#include <asm-generic/bitsperlong.h>
#endif
/*
* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*/
#ifndef _ASM_BYTEORDER_H
#define _ASM_BYTEORDER_H
#if defined(__GNUC__) && !defined(__STRICT_ANSI__) || defined(__KERNEL__)
# define __BYTEORDER_HAS_U64__
#endif
#include <linux/byteorder/little_endian.h>
#endif /* _ASM_BYTEORDER_H */
/*
* Cache definitions for the Hexagon architecture
*
* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*/
#ifndef __ASM_CACHE_H
#define __ASM_CACHE_H
/* Bytes per L1 cache line */
#define L1_CACHE_SHIFT (5)
#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
#define __cacheline_aligned __aligned(L1_CACHE_BYTES)
#define ____cacheline_aligned __aligned(L1_CACHE_BYTES)
/* See http://kerneltrap.org/node/15100 */
#define __read_mostly
#endif
/*
* Cache flush operations for the Hexagon architecture
*
* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*/
#ifndef _ASM_CACHEFLUSH_H
#define _ASM_CACHEFLUSH_H
#include <linux/cache.h>
#include <linux/mm.h>
#include <asm/string.h>
#include <asm-generic/cacheflush.h>
/* Cache flushing:
*
* - flush_cache_all() flushes entire cache
* - flush_cache_mm(mm) flushes the specified mm context's cache lines
* - flush_cache_page(mm, vmaddr, pfn) flushes a single page
* - flush_cache_range(vma, start, end) flushes a range of pages
* - flush_icache_range(start, end) flush a range of instructions
* - flush_dcache_page(pg) flushes(wback&invalidates) a page for dcache
* - flush_icache_page(vma, pg) flushes(invalidates) a page for icache
*
* Need to doublecheck which one is really needed for ptrace stuff to work.
*/
#define LINESIZE 32
#define LINEBITS 5
/*
* Flush Dcache range through current map.
*/
extern void flush_dcache_range(unsigned long start, unsigned long end);
/*
* Flush Icache range through current map.
*/
#undef flush_icache_range
extern void flush_icache_range(unsigned long start, unsigned long end);
/*
* Memory-management related flushes are there to ensure in non-physically
* indexed cache schemes that stale lines belonging to a given ASID aren't
* in the cache to confuse things. The prototype Hexagon Virtual Machine
* only uses a single ASID for all user-mode maps, which should
* mean that they aren't necessary. A brute-force, flush-everything
* implementation, with the name xxxxx_hexagon() is present in
* arch/hexagon/mm/cache.c, but let's not wire it up until we know
* it is needed.
*/
extern void flush_cache_all_hexagon(void);
/*
* This may or may not ever have to be non-null, depending on the
* virtual machine MMU. For a native kernel, it's definitiely a no-op
*
* This is also the place where deferred cache coherency stuff seems
* to happen, classically... but instead we do it like ia64 and
* clean the cache when the PTE is set.
*
*/
static inline void update_mmu_cache(struct vm_area_struct *vma,
unsigned long address, pte_t *ptep)
{
/* generic_ptrace_pokedata doesn't wind up here, does it? */
}
#undef copy_to_user_page
static inline void copy_to_user_page(struct vm_area_struct *vma,
struct page *page,
unsigned long vaddr,
void *dst, void *src, int len)
{
memcpy(dst, src, len);
if (vma->vm_flags & VM_EXEC) {
flush_icache_range((unsigned long) dst,
(unsigned long) dst + len);
}
}
extern void hexagon_inv_dcache_range(unsigned long start, unsigned long end);
extern void hexagon_clean_dcache_range(unsigned long start, unsigned long end);
#endif
/*
* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*/
#ifndef _ASM_CHECKSUM_H
#define _ASM_CHECKSUM_H
#define do_csum do_csum
unsigned int do_csum(const void *voidptr, int len);
/*
* the same as csum_partial, but copies from src while it
* checksums
*
* here even more important to align src and dst on a 32-bit (or even
* better 64-bit) boundary
*/
#define csum_partial_copy_nocheck csum_partial_copy_nocheck
__wsum csum_partial_copy_nocheck(const void *src, void *dst,
int len, __wsum sum);
/*
* computes the checksum of the TCP/UDP pseudo-header
* returns a 16-bit checksum, already complemented
*/
#define csum_tcpudp_nofold csum_tcpudp_nofold
__wsum csum_tcpudp_nofold(unsigned long saddr, unsigned long daddr,
unsigned short len, unsigned short proto, __wsum sum);
#define csum_tcpudp_magic csum_tcpudp_magic
__sum16 csum_tcpudp_magic(unsigned long saddr, unsigned long daddr,
unsigned short len, unsigned short proto, __wsum sum);
#include <asm-generic/checksum.h>
#endif
/*
* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*/
#ifndef _ASM_DELAY_H
#define _ASM_DELAY_H
#include <asm/param.h>
extern void __udelay(unsigned long usecs);
#define udelay(usecs) __udelay((usecs))
#endif /* _ASM_DELAY_H */
/*
* DMA operations for the Hexagon architecture
*
* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*/
#ifndef _ASM_DMA_MAPPING_H
#define _ASM_DMA_MAPPING_H
#include <linux/types.h>
#include <linux/cache.h>
#include <linux/mm.h>
#include <linux/scatterlist.h>
#include <linux/dma-mapping.h>
#include <linux/dma-debug.h>
#include <linux/dma-attrs.h>
#include <asm/io.h>
struct device;
extern int bad_dma_address;
extern struct dma_map_ops *dma_ops;
#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
static inline struct dma_map_ops *get_dma_ops(struct device *dev)
{
if (unlikely(dev == NULL))
return NULL;
return dma_ops;
}
extern int dma_supported(struct device *dev, u64 mask);
extern int dma_set_mask(struct device *dev, u64 mask);
extern int dma_is_consistent(struct device *dev, dma_addr_t dma_handle);
extern void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
enum dma_data_direction direction);
#include <asm-generic/dma-mapping-common.h>
static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
{
if (!dev->dma_mask)
return 0;
return addr + size - 1 <= *dev->dma_mask;
}
static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
{
struct dma_map_ops *dma_ops = get_dma_ops(dev);
if (dma_ops->mapping_error)
return dma_ops->mapping_error(dev, dma_addr);
return (dma_addr == bad_dma_address);
}
static inline void *dma_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t flag)
{
void *ret;
struct dma_map_ops *ops = get_dma_ops(dev);
BUG_ON(!dma_ops);
ret = ops->alloc_coherent(dev, size, dma_handle, flag);
debug_dma_alloc_coherent(dev, size, *dma_handle, ret);
return ret;
}
static inline void dma_free_coherent(struct device *dev, size_t size,
void *cpu_addr, dma_addr_t dma_handle)
{
struct dma_map_ops *dma_ops = get_dma_ops(dev);
BUG_ON(!dma_ops);
dma_ops->free_coherent(dev, size, cpu_addr, dma_handle);
debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
}
#endif
/*
* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*/
#ifndef _ASM_DMA_H
#define _ASM_DMA_H
#include <asm/io.h>
#define MAX_DMA_CHANNELS 1
#define MAX_DMA_ADDRESS (PAGE_OFFSET)
extern size_t hexagon_coherent_pool_size;
#endif
/*
* ELF definitions for the Hexagon architecture
*
* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*/
#ifndef __ASM_ELF_H
#define __ASM_ELF_H
#include <asm/ptrace.h>
#include <asm/user.h>
/*
* This should really be in linux/elf-em.h.
*/
#define EM_HEXAGON 164 /* QUALCOMM Hexagon */
struct elf32_hdr;
/*
* ELF header e_flags defines.
*/
/* should have stuff like "CPU type" and maybe "ABI version", etc */
/* Hexagon relocations */
/* V2 */
#define R_HEXAGON_NONE 0
#define R_HEXAGON_B22_PCREL 1
#define R_HEXAGON_B15_PCREL 2
#define R_HEXAGON_B7_PCREL 3
#define R_HEXAGON_LO16 4
#define R_HEXAGON_HI16 5
#define R_HEXAGON_32 6
#define R_HEXAGON_16 7
#define R_HEXAGON_8 8
#define R_HEXAGON_GPREL16_0 9
#define R_HEXAGON_GPREL16_1 10
#define R_HEXAGON_GPREL16_2 11
#define R_HEXAGON_GPREL16_3 12
#define R_HEXAGON_HL16 13
/* V3 */
#define R_HEXAGON_B13_PCREL 14
/* V4 */
#define R_HEXAGON_B9_PCREL 15
/* V4 (extenders) */
#define R_HEXAGON_B32_PCREL_X 16
#define R_HEXAGON_32_6_X 17
/* V4 (extended) */
#define R_HEXAGON_B22_PCREL_X 18
#define R_HEXAGON_B15_PCREL_X 19
#define R_HEXAGON_B13_PCREL_X 20
#define R_HEXAGON_B9_PCREL_X 21
#define R_HEXAGON_B7_PCREL_X 22
#define R_HEXAGON_16_X 23
#define R_HEXAGON_12_X 24
#define R_HEXAGON_11_X 25
#define R_HEXAGON_10_X 26
#define R_HEXAGON_9_X 27
#define R_HEXAGON_8_X 28
#define R_HEXAGON_7_X 29
#define R_HEXAGON_6_X 30
/* V2 PIC */
#define R_HEXAGON_32_PCREL 31
#define R_HEXAGON_COPY 32
#define R_HEXAGON_GLOB_DAT 33
#define R_HEXAGON_JMP_SLOT 34
#define R_HEXAGON_RELATIVE 35
#define R_HEXAGON_PLT_B22_PCREL 36
#define R_HEXAGON_GOTOFF_LO16 37
#define R_HEXAGON_GOTOFF_HI16 38
#define R_HEXAGON_GOTOFF_32 39
#define R_HEXAGON_GOT_LO16 40
#define R_HEXAGON_GOT_HI16 41
#define R_HEXAGON_GOT_32 42
#define R_HEXAGON_GOT_16 43
/*
* ELF register definitions..
*/
typedef unsigned long elf_greg_t;
typedef struct user_regs_struct elf_gregset_t;
#define ELF_NGREG (sizeof(elf_gregset_t)/sizeof(unsigned long))
/* Placeholder */
typedef unsigned long elf_fpregset_t;
/*
* Bypass the whole "regsets" thing for now and use the define.
*/
#define ELF_CORE_COPY_REGS(DEST, REGS) \
do { \
DEST.r0 = REGS->r00; \
DEST.r1 = REGS->r01; \
DEST.r2 = REGS->r02; \
DEST.r3 = REGS->r03; \
DEST.r4 = REGS->r04; \
DEST.r5 = REGS->r05; \
DEST.r6 = REGS->r06; \
DEST.r7 = REGS->r07; \
DEST.r8 = REGS->r08; \
DEST.r9 = REGS->r09; \
DEST.r10 = REGS->r10; \
DEST.r11 = REGS->r11; \
DEST.r12 = REGS->r12; \
DEST.r13 = REGS->r13; \
DEST.r14 = REGS->r14; \
DEST.r15 = REGS->r15; \
DEST.r16 = REGS->r16; \
DEST.r17 = REGS->r17; \
DEST.r18 = REGS->r18; \
DEST.r19 = REGS->r19; \
DEST.r20 = REGS->r20; \
DEST.r21 = REGS->r21; \
DEST.r22 = REGS->r22; \
DEST.r23 = REGS->r23; \
DEST.r24 = REGS->r24; \
DEST.r25 = REGS->r25; \
DEST.r26 = REGS->r26; \
DEST.r27 = REGS->r27; \
DEST.r28 = REGS->r28; \
DEST.r29 = pt_psp(REGS); \
DEST.r30 = REGS->r30; \
DEST.r31 = REGS->r31; \
DEST.sa0 = REGS->sa0; \
DEST.lc0 = REGS->lc0; \
DEST.sa1 = REGS->sa1; \
DEST.lc1 = REGS->lc1; \
DEST.m0 = REGS->m0; \
DEST.m1 = REGS->m1; \
DEST.usr = REGS->usr; \
DEST.p3_0 = REGS->preds; \
DEST.gp = REGS->gp; \
DEST.ugp = REGS->ugp; \
DEST.pc = pt_elr(REGS); \
DEST.cause = pt_cause(REGS); \
DEST.badva = pt_badva(REGS); \
} while (0);
/*
* This is used to ensure we don't load something for the wrong architecture.
* Checks the machine and ABI type.
*/
#define elf_check_arch(hdr) ((hdr)->e_machine == EM_HEXAGON)
/*
* These are used to set parameters in the core dumps.
*/
#define ELF_CLASS ELFCLASS32
#define ELF_DATA ELFDATA2LSB
#define ELF_ARCH EM_HEXAGON
#ifdef CONFIG_HEXAGON_ARCH_V2
#define ELF_CORE_EFLAGS 0x1
#endif
#ifdef CONFIG_HEXAGON_ARCH_V3
#define ELF_CORE_EFLAGS 0x2
#endif
#ifdef CONFIG_HEXAGON_ARCH_V4
#define ELF_CORE_EFLAGS 0x3
#endif
/*
* Some architectures have ld.so set up a pointer to a function
* to be registered using atexit, to facilitate cleanup. So that
* static executables will be well-behaved, we would null the register
* in question here, in the pt_regs structure passed. For now,
* leave it a null macro.
*/
#define ELF_PLAT_INIT(regs, load_addr) do { } while (0)
#define USE_ELF_CORE_DUMP
#define CORE_DUMP_USE_REGSET
/* Hrm is this going to cause problems for changing PAGE_SIZE? */
#define ELF_EXEC_PAGESIZE 4096
/*
* This is the location that an ET_DYN program is loaded if exec'ed. Typical
* use of this is to invoke "./ld.so someprog" to test out a new version of
* the loader. We need to make sure that it is out of the way of the program
* that it will "exec", and that there is sufficient room for the brk.
*/
#define ELF_ET_DYN_BASE 0x08000000UL
/*
* This yields a mask that user programs can use to figure out what
* instruction set this cpu supports.
*/
#define ELF_HWCAP (0)
/*
* This yields a string that ld.so will use to load implementation
* specific libraries for optimization. This is more specific in
* intent than poking at uname or /proc/cpuinfo.
*/
#define ELF_PLATFORM (NULL)
#ifdef __KERNEL__
#define SET_PERSONALITY(ex) set_personality(PER_LINUX)
#endif
#define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
struct linux_binprm;
extern int arch_setup_additional_pages(struct linux_binprm *bprm,
int uses_interp);
#endif
/*
* Fixmap support for Hexagon - enough to support highmem features
*
* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*/
#ifndef _ASM_FIXMAP_H
#define _ASM_FIXMAP_H
/*
* A lot of the fixmap info is already in mem-layout.h
*/
#include <asm/mem-layout.h>
/*
* Full fixmap support involves set_fixmap() functions, but
* these may not be needed if all we're after is an area for
* highmem kernel mappings.
*/
#define __fix_to_virt(x) (FIXADDR_TOP - ((x) << PAGE_SHIFT))
#define __virt_to_fix(x) ((FIXADDR_TOP - ((x)&PAGE_MASK)) >> PAGE_SHIFT)
extern void __this_fixmap_does_not_exist(void);
/**
* fix_to_virt -- "index to address" translation.
*
* If anyone tries to use the idx directly without translation,
* we catch the bug with a NULL-deference kernel oops. Illegal
* ranges of incoming indices are caught too.
*/
static inline unsigned long fix_to_virt(const unsigned int idx)
{
/*
* This branch gets completely eliminated after inlining,
* except when someone tries to use fixaddr indices in an
* illegal way. (such as mixing up address types or using
* out-of-range indices).
*
* If it doesn't get removed, the linker will complain
* loudly with a reasonably clear error message..
*/
if (idx >= __end_of_fixed_addresses)
__this_fixmap_does_not_exist();
return __fix_to_virt(idx);
}
static inline unsigned long virt_to_fix(const unsigned long vaddr)
{
BUG_ON(vaddr >= FIXADDR_TOP || vaddr < FIXADDR_START);
return __virt_to_fix(vaddr);
}
#define kmap_get_fixmap_pte(vaddr) \
pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr), \
(vaddr)), (vaddr)), (vaddr))
#endif
/*
* If the FPU is used inside the kernel,
* kernel_fpu_end() will be defined here.
*/
#ifndef _ASM_HEXAGON_FUTEX_H
#define _ASM_HEXAGON_FUTEX_H
#ifdef __KERNEL__
#include <linux/futex.h>
#include <linux/uaccess.h>
#include <asm/errno.h>
/* XXX TODO-- need to add sync barriers! */
#define __futex_atomic_op(insn, ret, oldval, uaddr, oparg) \
__asm__ __volatile( \
"1: %0 = memw_locked(%3);\n" \
/* For example: %1 = %4 */ \
insn \
"2: memw_locked(%3,p2) = %1;\n" \
" if !p2 jump 1b;\n" \
" %1 = #0;\n" \
"3:\n" \
".section .fixup,\"ax\"\n" \
"4: %1 = #%5;\n" \
" jump 3b\n" \
".previous\n" \
".section __ex_table,\"a\"\n" \
".long 1b,4b,2b,4b\n" \
".previous\n" \
: "=&r" (oldval), "=&r" (ret), "+m" (*uaddr) \
: "r" (uaddr), "r" (oparg), "i" (-EFAULT) \
: "p2", "memory")
static inline int
futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
{
int op = (encoded_op >> 28) & 7;
int cmp = (encoded_op >> 24) & 15;
int oparg = (encoded_op << 8) >> 20;
int cmparg = (encoded_op << 20) >> 20;
int oldval = 0, ret;
if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
oparg = 1 << oparg;
if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
return -EFAULT;
pagefault_disable();
switch (op) {
case FUTEX_OP_SET:
__futex_atomic_op("%1 = %4\n", ret, oldval, uaddr, oparg);
break;
case FUTEX_OP_ADD:
__futex_atomic_op("%1 = add(%0,%4)\n", ret, oldval, uaddr,
oparg);
break;
case FUTEX_OP_OR:
__futex_atomic_op("%1 = or(%0,%4)\n", ret, oldval, uaddr,
oparg);
break;
case FUTEX_OP_ANDN:
__futex_atomic_op("%1 = not(%4); %1 = and(%0,%1)\n", ret,
oldval, uaddr, oparg);
break;
case FUTEX_OP_XOR:
__futex_atomic_op("%1 = xor(%0,%4)\n", ret, oldval, uaddr,
oparg);
break;
default:
ret = -ENOSYS;
}
pagefault_enable();
if (!ret) {
switch (cmp) {
case FUTEX_OP_CMP_EQ:
ret = (oldval == cmparg);
break;
case FUTEX_OP_CMP_NE:
ret = (oldval != cmparg);
break;
case FUTEX_OP_CMP_LT:
ret = (oldval < cmparg);
break;
case FUTEX_OP_CMP_GE:
ret = (oldval >= cmparg);
break;
case FUTEX_OP_CMP_LE:
ret = (oldval <= cmparg);
break;
case FUTEX_OP_CMP_GT:
ret = (oldval > cmparg);
break;
default:
ret = -ENOSYS;
}
}
return ret;
}
static inline int
futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, u32 oldval,
u32 newval)
{
int prev;
int ret;
if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
return -EFAULT;
__asm__ __volatile__ (
"1: %1 = memw_locked(%3)\n"
" {\n"
" p2 = cmp.eq(%1,%4)\n"
" if !p2.new jump:NT 3f\n"
" }\n"
"2: memw_locked(%3,p2) = %5\n"
" if !p2 jump 1b\n"
"3:\n"
".section .fixup,\"ax\"\n"
"4: %0 = #%6\n"
" jump 3b\n"
".previous\n"
".section __ex_table,\"a\"\n"
".long 1b,4b,2b,4b\n"
".previous\n"
: "+r" (ret), "=&r" (prev), "+m" (*uaddr)
: "r" (uaddr), "r" (oldval), "r" (newval), "i"(-EFAULT)
: "p2", "memory");
*uval = prev;
return ret;
}
#endif /* __KERNEL__ */
#endif /* _ASM_HEXAGON_FUTEX_H */
/*
* Declarations for to Hexagon Virtal Machine.
*
* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*/
#ifndef ASM_HEXAGON_VM_H
#define ASM_HEXAGON_VM_H
/*
* In principle, a Linux kernel for the VM could
* selectively define the virtual instructions
* as inline assembler macros, but for a first pass,
* we'll use subroutines for both the VM and the native
* kernels. It's costing a subroutine call/return,
* but it makes for a single set of entry points
* for tracing/debugging.
*/
/*
* Lets make this stuff visible only if configured,
* so we can unconditionally include the file.
*/
#ifndef __ASSEMBLY__
enum VM_CACHE_OPS {
ickill,
dckill,
l2kill,
dccleaninva,
icinva,
idsync,
fetch_cfg
};
enum VM_INT_OPS {
nop,
globen,
globdis,
locen,
locdis,
affinity,
get,
peek,
status,
post,
clear
};
extern void _K_VM_event_vector(void);
void __vmrte(void);
long __vmsetvec(void *);
long __vmsetie(long);
long __vmgetie(void);
long __vmintop(enum VM_INT_OPS, long, long, long, long);
long __vmclrmap(void *, unsigned long);
long __vmnewmap(void *);
long __vmcache(enum VM_CACHE_OPS op, unsigned long addr, unsigned long len);
unsigned long long __vmgettime(void);
long __vmsettime(unsigned long long);
long __vmstart(void *, void *);
void __vmstop(void);
long __vmwait(void);
void __vmyield(void);
long __vmvpid(void);
static inline long __vmcache_ickill(void)
{
return __vmcache(ickill, 0, 0);
}
static inline long __vmcache_dckill(void)
{
return __vmcache(dckill, 0, 0);
}
static inline long __vmcache_l2kill(void)
{
return __vmcache(l2kill, 0, 0);
}
static inline long __vmcache_dccleaninva(unsigned long addr, unsigned long len)
{
return __vmcache(dccleaninva, addr, len);
}
static inline long __vmcache_icinva(unsigned long addr, unsigned long len)
{
return __vmcache(icinva, addr, len);
}
static inline long __vmcache_idsync(unsigned long addr,
unsigned long len)
{
return __vmcache(idsync, addr, len);
}
static inline long __vmcache_fetch_cfg(unsigned long val)
{
return __vmcache(fetch_cfg, val, 0);
}
/* interrupt operations */
static inline long __vmintop_nop(void)
{
return __vmintop(nop, 0, 0, 0, 0);
}
static inline long __vmintop_globen(long i)
{
return __vmintop(globen, i, 0, 0, 0);
}
static inline long __vmintop_globdis(long i)
{
return __vmintop(globdis, i, 0, 0, 0);
}
static inline long __vmintop_locen(long i)
{
return __vmintop(locen, i, 0, 0, 0);
}
static inline long __vmintop_locdis(long i)
{
return __vmintop(locdis, i, 0, 0, 0);
}
static inline long __vmintop_affinity(long i, long cpu)
{
return __vmintop(locdis, i, cpu, 0, 0);
}
static inline long __vmintop_get(void)
{
return __vmintop(get, 0, 0, 0, 0);
}
static inline long __vmintop_peek(void)
{
return __vmintop(peek, 0, 0, 0, 0);
}
static inline long __vmintop_status(long i)
{
return __vmintop(status, i, 0, 0, 0);
}
static inline long __vmintop_post(long i)
{
return __vmintop(post, i, 0, 0, 0);
}
static inline long __vmintop_clear(long i)
{
return __vmintop(clear, i, 0, 0, 0);
}
#else /* Only assembly code should reference these */
#define HVM_TRAP1_VMRTE 1
#define HVM_TRAP1_VMSETVEC 2
#define HVM_TRAP1_VMSETIE 3
#define HVM_TRAP1_VMGETIE 4
#define HVM_TRAP1_VMINTOP 5
#define HVM_TRAP1_VMCLRMAP 10
#define HVM_TRAP1_VMNEWMAP 11
#define HVM_TRAP1_FORMERLY_VMWIRE 12
#define HVM_TRAP1_VMCACHE 13
#define HVM_TRAP1_VMGETTIME 14
#define HVM_TRAP1_VMSETTIME 15
#define HVM_TRAP1_VMWAIT 16
#define HVM_TRAP1_VMYIELD 17
#define HVM_TRAP1_VMSTART 18
#define HVM_TRAP1_VMSTOP 19
#define HVM_TRAP1_VMVPID 20
#define HVM_TRAP1_VMSETREGS 21
#define HVM_TRAP1_VMGETREGS 22
#endif /* __ASSEMBLY__ */
/*
* Constants for virtual instruction parameters and return values
*/
/* vmsetie arguments */
#define VM_INT_DISABLE 0
#define VM_INT_ENABLE 1
/* vmsetimask arguments */
#define VM_INT_UNMASK 0
#define VM_INT_MASK 1
#define VM_NEWMAP_TYPE_LINEAR 0
#define VM_NEWMAP_TYPE_PGTABLES 1
/*
* Event Record definitions useful to both C and Assembler
*/
/* VMEST Layout */
#define HVM_VMEST_UM_SFT 31
#define HVM_VMEST_UM_MSK 1
#define HVM_VMEST_IE_SFT 30
#define HVM_VMEST_IE_MSK 1
#define HVM_VMEST_EVENTNUM_SFT 16
#define HVM_VMEST_EVENTNUM_MSK 0xff
#define HVM_VMEST_CAUSE_SFT 0
#define HVM_VMEST_CAUSE_MSK 0xffff
/*
* The initial program gets to find a system environment descriptor
* on its stack when it begins exection. The first word is a version
* code to indicate what is there. Zero means nothing more.
*/
#define HEXAGON_VM_SED_NULL 0
/*
* Event numbers for vector binding
*/
#define HVM_EV_RESET 0
#define HVM_EV_MACHCHECK 1
#define HVM_EV_GENEX 2
#define HVM_EV_TRAP 8
#define HVM_EV_INTR 15
/* These shoud be nuked as soon as we know the VM is up to spec v0.1.1 */
#define HVM_EV_INTR_0 16
#define HVM_MAX_INTR 240
/*
* Cause values for General Exception
*/
#define HVM_GE_C_BUS 0x01
#define HVM_GE_C_XPROT 0x11
#define HVM_GE_C_XUSER 0x14
#define HVM_GE_C_INVI 0x15
#define HVM_GE_C_PRIVI 0x1B
#define HVM_GE_C_XMAL 0x1C
#define HVM_GE_C_RMAL 0x20
#define HVM_GE_C_WMAL 0x21
#define HVM_GE_C_RPROT 0x22
#define HVM_GE_C_WPROT 0x23
#define HVM_GE_C_RUSER 0x24
#define HVM_GE_C_WUSER 0x25
#define HVM_GE_C_CACHE 0x28
/*
* Cause codes for Machine Check
*/
#define HVM_MCHK_C_DOWN 0x00
#define HVM_MCHK_C_BADSP 0x01
#define HVM_MCHK_C_BADEX 0x02
#define HVM_MCHK_C_BADPT 0x03
#define HVM_MCHK_C_REGWR 0x29
#endif
/*
* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*/
#ifndef _ASM_HEXAGON_INTRINSICS_H
#define _ASM_HEXAGON_INTRINSICS_H
#define HEXAGON_P_vrmpyhacc_PP __builtin_HEXAGON_M2_vrmac_s0
#define HEXAGON_P_vrmpyh_PP __builtin_HEXAGON_M2_vrmpy_s0
#define HEXAGON_R_cl0_R __builtin_HEXAGON_S2_cl0
#endif
/*
* IO definitions for the Hexagon architecture
*
* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*/
#ifndef _ASM_IO_H
#define _ASM_IO_H
#ifdef __KERNEL__
#include <linux/types.h>
#include <linux/delay.h>
#include <linux/vmalloc.h>
#include <asm/string.h>
#include <asm/mem-layout.h>
#include <asm/iomap.h>
#include <asm/page.h>
#include <asm/cacheflush.h>
#include <asm/tlbflush.h>
/*
* We don't have PCI yet.
* _IO_BASE is pointing at what should be unused virtual space.
*/
#define IO_SPACE_LIMIT 0xffff
#define _IO_BASE ((void __iomem *)0xfe000000)
extern int remap_area_pages(unsigned long start, unsigned long phys_addr,
unsigned long end, unsigned long flags);
extern void __iounmap(const volatile void __iomem *addr);
/* Defined in lib/io.c, needed for smc91x driver. */
extern void __raw_readsw(const void __iomem *addr, void *data, int wordlen);
extern void __raw_writesw(void __iomem *addr, const void *data, int wordlen);
extern void __raw_readsl(const void __iomem *addr, void *data, int wordlen);
extern void __raw_writesl(void __iomem *addr, const void *data, int wordlen);
#define readsw(p, d, l) __raw_readsw(p, d, l)
#define writesw(p, d, l) __raw_writesw(p, d, l)
#define readsl(p, d, l) __raw_readsl(p, d, l)
#define writesl(p, d, l) __raw_writesl(p, d, l)
/*
* virt_to_phys - map virtual address to physical
* @address: address to map
*/
static inline unsigned long virt_to_phys(volatile void *address)
{
return __pa(address);
}
/*
* phys_to_virt - map physical address to virtual
* @address: address to map
*/
static inline void *phys_to_virt(unsigned long address)
{
return __va(address);
}
/*
* convert a physical pointer to a virtual kernel pointer for
* /dev/mem access.
*/
#define xlate_dev_kmem_ptr(p) __va(p)
#define xlate_dev_mem_ptr(p) __va(p)
/*
* IO port access primitives. Hexagon doesn't have special IO access
* instructions; all I/O is memory mapped.
*
* in/out are used for "ports", but we don't have "port instructions",
* so these are really just memory mapped too.
*/
/*
* readb - read byte from memory mapped device
* @addr: pointer to memory
*
* Operates on "I/O bus memory space"
*/
static inline u8 readb(const volatile void __iomem *addr)
{
u8 val;
asm volatile(
"%0 = memb(%1);"
: "=&r" (val)
: "r" (addr)
);
return val;
}
static inline u16 readw(const volatile void __iomem *addr)
{
u16 val;
asm volatile(
"%0 = memh(%1);"
: "=&r" (val)
: "r" (addr)
);
return val;
}
static inline u32 readl(const volatile void __iomem *addr)
{
u32 val;
asm volatile(
"%0 = memw(%1);"
: "=&r" (val)
: "r" (addr)
);
return val;
}
/*
* writeb - write a byte to a memory location
* @data: data to write to
* @addr: pointer to memory
*
*/
static inline void writeb(u8 data, volatile void __iomem *addr)
{
asm volatile(
"memb(%0) = %1;"
:
: "r" (addr), "r" (data)
: "memory"
);
}
static inline void writew(u16 data, volatile void __iomem *addr)
{
asm volatile(
"memh(%0) = %1;"
:
: "r" (addr), "r" (data)
: "memory"
);
}
static inline void writel(u32 data, volatile void __iomem *addr)
{
asm volatile(
"memw(%0) = %1;"
:
: "r" (addr), "r" (data)
: "memory"
);
}
#define __raw_writeb writeb
#define __raw_writew writew
#define __raw_writel writel
#define __raw_readb readb
#define __raw_readw readw
#define __raw_readl readl
/*
* Need an mtype somewhere in here, for cache type deals?
* This is probably too long for an inline.
*/
void __iomem *ioremap_nocache(unsigned long phys_addr, unsigned long size);
static inline void __iomem *ioremap(unsigned long phys_addr, unsigned long size)
{
return ioremap_nocache(phys_addr, size);
}
static inline void iounmap(volatile void __iomem *addr)
{
__iounmap(addr);
}
#define __raw_writel writel
static inline void memcpy_fromio(void *dst, const volatile void __iomem *src,
int count)
{
memcpy(dst, (void *) src, count);
}
static inline void memcpy_toio(volatile void __iomem *dst, const void *src,
int count)
{
memcpy((void *) dst, src, count);
}
#define PCI_IO_ADDR (volatile void __iomem *)
/*
* inb - read byte from I/O port or something
* @port: address in I/O space
*
* Operates on "I/O bus I/O space"
*/
static inline u8 inb(unsigned long port)
{
return readb(_IO_BASE + (port & IO_SPACE_LIMIT));
}
static inline u16 inw(unsigned long port)
{
return readw(_IO_BASE + (port & IO_SPACE_LIMIT));
}
static inline u32 inl(unsigned long port)
{
return readl(_IO_BASE + (port & IO_SPACE_LIMIT));
}
/*
* outb - write a byte to a memory location
* @data: data to write to
* @addr: address in I/O space
*/
static inline void outb(u8 data, unsigned long port)
{
writeb(data, _IO_BASE + (port & IO_SPACE_LIMIT));
}
static inline void outw(u16 data, unsigned long port)
{
writew(data, _IO_BASE + (port & IO_SPACE_LIMIT));
}
static inline void outl(u32 data, unsigned long port)
{
writel(data, _IO_BASE + (port & IO_SPACE_LIMIT));
}
#define outb_p outb
#define outw_p outw
#define outl_p outl
#define inb_p inb
#define inw_p inw
#define inl_p inl
static inline void insb(unsigned long port, void *buffer, int count)
{
if (count) {
u8 *buf = buffer;
do {
u8 x = inb(port);
*buf++ = x;
} while (--count);
}
}
static inline void insw(unsigned long port, void *buffer, int count)
{
if (count) {
u16 *buf = buffer;
do {
u16 x = inw(port);
*buf++ = x;
} while (--count);
}
}
static inline void insl(unsigned long port, void *buffer, int count)
{
if (count) {
u32 *buf = buffer;
do {
u32 x = inw(port);
*buf++ = x;
} while (--count);
}
}
static inline void outsb(unsigned long port, const void *buffer, int count)
{
if (count) {
const u8 *buf = buffer;
do {
outb(*buf++, port);
} while (--count);
}
}
static inline void outsw(unsigned long port, const void *buffer, int count)
{
if (count) {
const u16 *buf = buffer;
do {
outw(*buf++, port);
} while (--count);
}
}
static inline void outsl(unsigned long port, const void *buffer, int count)
{
if (count) {
const u32 *buf = buffer;
do {
outl(*buf++, port);
} while (--count);
}
}
#define flush_write_buffers() do { } while (0)
#endif /* __KERNEL__ */
#endif
/*
* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*/
#ifndef _ASM_IRQ_H_
#define _ASM_IRQ_H_
/* Number of first-level interrupts associated with the CPU core. */
#define HEXAGON_CPUINTS 32
/*
* Must define NR_IRQS before including <asm-generic/irq.h>
* 64 == the two SIRC's, 176 == the two gpio's
*
* IRQ configuration is still in flux; defining this to a comfortably
* large number.
*/
#define NR_IRQS 512
#include <asm-generic/irq.h>
#endif
/*
* IRQ support for the Hexagon architecture
*
* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*/
#ifndef _ASM_IRQFLAGS_H
#define _ASM_IRQFLAGS_H
#include <asm/hexagon_vm.h>
#include <linux/types.h>
static inline unsigned long arch_local_save_flags(void)
{
return __vmgetie();
}
static inline unsigned long arch_local_irq_save(void)
{
return __vmsetie(VM_INT_DISABLE);
}
static inline bool arch_irqs_disabled_flags(unsigned long flags)
{
return !flags;
}
static inline bool arch_irqs_disabled(void)
{
return !__vmgetie();
}
static inline void arch_local_irq_enable(void)
{
__vmsetie(VM_INT_ENABLE);
}
static inline void arch_local_irq_disable(void)
{
__vmsetie(VM_INT_DISABLE);
}
static inline void arch_local_irq_restore(unsigned long flags)
{
__vmsetie(flags);
}
#endif
/*
* arch/hexagon/include/asm/kgdb.h - Hexagon KGDB Support
*
* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*/
#ifndef __HEXAGON_KGDB_H__
#define __HEXAGON_KGDB_H__
#define BREAK_INSTR_SIZE 4
#define CACHE_FLUSH_IS_SAFE 1
#define BUFMAX ((NUMREGBYTES * 2) + 512)
static inline void arch_kgdb_breakpoint(void)
{
asm("trap0(#0xDB)");
}
/* Registers:
* 32 gpr + sa0/1 + lc0/1 + m0/1 + gp + ugp + pred + pc = 42 total.
* vm regs = psp+elr+est+badva = 4
* syscall+restart = 2 more
* so 48 = 42 +4 + 2
*/
#define DBG_USER_REGS 42
#define DBG_MAX_REG_NUM (DBG_USER_REGS + 6)
#define NUMREGBYTES (DBG_MAX_REG_NUM*4)
#endif /* __HEXAGON_KGDB_H__ */
/*
* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*/
#ifndef __ASM_LINKAGE_H
#define __ASM_LINKAGE_H
#define __ALIGN .align 4
#define __ALIGN_STR ".align 4"
#endif
/*
* Memory layout definitions for the Hexagon architecture
*
* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*/
#ifndef _ASM_HEXAGON_MEM_LAYOUT_H
#define _ASM_HEXAGON_MEM_LAYOUT_H
#include <linux/const.h>
/*
* Have to do this for ginormous numbers, else they get printed as
* negative numbers, which the linker no likey when you try to
* assign it to the location counter.
*/
#define PAGE_OFFSET _AC(0xc0000000, UL)
/*
* LOAD_ADDRESS is the physical/linear address of where in memory
* the kernel gets loaded. The 12 least significant bits must be zero (0)
* due to limitations on setting the EVB
*
*/
#ifndef LOAD_ADDRESS
#define LOAD_ADDRESS 0x00000000
#endif
#define TASK_SIZE (PAGE_OFFSET)
/* not sure how these are used yet */
#define STACK_TOP TASK_SIZE
#define STACK_TOP_MAX TASK_SIZE
#ifndef __ASSEMBLY__
enum fixed_addresses {
FIX_KMAP_BEGIN,
FIX_KMAP_END, /* check for per-cpuism */
__end_of_fixed_addresses
};
#define MIN_KERNEL_SEG 0x300 /* From 0xc0000000 */
extern int max_kernel_seg;
/*
* Start of vmalloc virtual address space for kernel;
* supposed to be based on the amount of physical memory available
*/
#define VMALLOC_START (PAGE_OFFSET + VMALLOC_OFFSET + \
(unsigned long)high_memory)
/* Gap between physical ram and vmalloc space for guard purposes. */
#define VMALLOC_OFFSET PAGE_SIZE
/*
* Create the space between VMALLOC_START and FIXADDR_TOP backwards
* from the ... "top".
*
* Permanent IO mappings will live at 0xfexx_xxxx
* Hypervisor occupies the last 16MB page at 0xffxxxxxx
*/
#define FIXADDR_TOP 0xfe000000
#define FIXADDR_SIZE (__end_of_fixed_addresses << PAGE_SHIFT)
#define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE)
/*
* "permanent kernel mappings", defined as long-lasting mappings of
* high-memory page frames into the kernel address space.
*/
#define LAST_PKMAP PTRS_PER_PTE
#define LAST_PKMAP_MASK (LAST_PKMAP - 1)
#define PKMAP_NR(virt) ((virt - PKMAP_BASE) >> PAGE_SHIFT)
#define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT))
/*
* To the "left" of the fixed map space is the kmap space
*
* "Permanent Kernel Mappings"; fancy (or less fancy) PTE table
* that looks like it's actually walked.
* Need to check the alignment/shift usage; some archs use
* PMD_MASK on this value
*/
#define PKMAP_BASE (FIXADDR_START-PAGE_SIZE*LAST_PKMAP)
/*
* 2 pages of guard gap between where vmalloc area ends
* and pkmap_base begins.
*/
#define VMALLOC_END (PKMAP_BASE-PAGE_SIZE*2)
#endif /* !__ASSEMBLY__ */
#endif /* _ASM_HEXAGON_MEM_LAYOUT_H */
/*
* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*/
#ifndef _ASM_MMU_H
#define _ASM_MMU_H
#include <asm/vdso.h>
/*
* Architecture-specific state for a mm_struct.
* For the Hexagon Virtual Machine, it can be a copy
* of the pointer to the page table base.
*/
struct mm_context {
unsigned long long generation;
unsigned long ptbase;
struct hexagon_vdso *vdso;
};
typedef struct mm_context mm_context_t;
#endif
/*
* MM context support for the Hexagon architecture
*
* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*/
#ifndef _ASM_MMU_CONTEXT_H
#define _ASM_MMU_CONTEXT_H
#include <asm/setup.h>
#include <asm/page.h>
#include <asm/pgalloc.h>
#include <asm/mem-layout.h>
static inline void destroy_context(struct mm_struct *mm)
{
}
/*
* VM port hides all TLB management, so "lazy TLB" isn't very
* meaningful. Even for ports to architectures with visble TLBs,
* this is almost invariably a null function.
*/
static inline void enter_lazy_tlb(struct mm_struct *mm,
struct task_struct *tsk)
{
}
/*
* Architecture-specific actions, if any, for memory map deactivation.
*/
static inline void deactivate_mm(struct task_struct *tsk,
struct mm_struct *mm)
{
}
/**
* init_new_context - initialize context related info for new mm_struct instance
* @tsk: pointer to a task struct
* @mm: pointer to a new mm struct
*/
static inline int init_new_context(struct task_struct *tsk,
struct mm_struct *mm)
{
/* mm->context is set up by pgd_alloc */
return 0;
}
/*
* Switch active mm context
*/
static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
struct task_struct *tsk)
{
int l1;
/*
* For virtual machine, we have to update system map if it's been
* touched.
*/
if (next->context.generation < prev->context.generation) {
for (l1 = MIN_KERNEL_SEG; l1 <= max_kernel_seg; l1++)
next->pgd[l1] = init_mm.pgd[l1];
next->context.generation = prev->context.generation;
}
__vmnewmap((void *)next->context.ptbase);
}
/*
* Activate new memory map for task
*/
static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next)
{
unsigned long flags;
local_irq_save(flags);
switch_mm(prev, next, current_thread_info()->task);
local_irq_restore(flags);
}
/* Generic hooks for arch_dup_mmap and arch_exit_mmap */
#include <asm-generic/mm_hooks.h>
#endif
/*
* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*/
#ifndef _ASM_MODULE_H
#define _ASM_MODULE_H
#include <asm-generic/module.h>
#define MODULE_ARCH_VERMAGIC __stringify(PROCESSOR_MODEL_NAME) " "
#endif
/*
* Pull in the generic implementation for the mutex fastpath.
*
* TODO: implement optimized primitives instead, or leave the generic
* implementation in place, or pick the atomic_xchg() based generic
* implementation. (see asm-generic/mutex-xchg.h for details)
*/
#include <asm-generic/mutex-xchg.h>
/*
* Page management definitions for the Hexagon architecture
*
* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*/
#ifndef _ASM_PAGE_H
#define _ASM_PAGE_H
#include <linux/const.h>
/* This is probably not the most graceful way to handle this. */
#ifdef CONFIG_PAGE_SIZE_4KB
#define PAGE_SHIFT 12
#define HEXAGON_L1_PTE_SIZE __HVM_PDE_S_4KB
#endif
#ifdef CONFIG_PAGE_SIZE_16KB
#define PAGE_SHIFT 14
#define HEXAGON_L1_PTE_SIZE __HVM_PDE_S_16KB
#endif
#ifdef CONFIG_PAGE_SIZE_64KB
#define PAGE_SHIFT 16
#define HEXAGON_L1_PTE_SIZE __HVM_PDE_S_64KB
#endif
#ifdef CONFIG_PAGE_SIZE_256KB
#define PAGE_SHIFT 18
#define HEXAGON_L1_PTE_SIZE __HVM_PDE_S_256KB
#endif
#ifdef CONFIG_PAGE_SIZE_1MB
#define PAGE_SHIFT 20
#define HEXAGON_L1_PTE_SIZE __HVM_PDE_S_1MB
#endif
/*
* These should be defined in hugetlb.h, but apparently not.
* "Huge" for us should be 4MB or 16MB, which are both represented
* in L1 PTE's. Right now, it's set up for 4MB.
*/
#ifdef CONFIG_HUGETLB_PAGE
#define HPAGE_SHIFT 22
#define HPAGE_SIZE (1UL << HPAGE_SHIFT)
#define HPAGE_MASK (~(HPAGE_SIZE-1))
#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT-PAGE_SHIFT)
#define HVM_HUGEPAGE_SIZE 0x5
#endif
#define PAGE_SIZE (1UL << PAGE_SHIFT)
#define PAGE_MASK (~((1 << PAGE_SHIFT) - 1))
#ifdef __KERNEL__
#ifndef __ASSEMBLY__
/*
* This is for PFN_DOWN, which mm.h needs. Seems the right place to pull it in.
*/
#include <linux/pfn.h>
/*
* We implement a two-level architecture-specific page table structure.
* Null intermediate page table level (pmd, pud) definitions will come from
* asm-generic/pagetable-nopmd.h and asm-generic/pagetable-nopud.h
*/
typedef struct { unsigned long pte; } pte_t;
typedef struct { unsigned long pgd; } pgd_t;
typedef struct { unsigned long pgprot; } pgprot_t;
typedef struct page *pgtable_t;
#define pte_val(x) ((x).pte)
#define pgd_val(x) ((x).pgd)
#define pgprot_val(x) ((x).pgprot)
#define __pte(x) ((pte_t) { (x) })
#define __pgd(x) ((pgd_t) { (x) })
#define __pgprot(x) ((pgprot_t) { (x) })
/*
* We need a __pa and a __va routine for kernel space.
* MIPS says they're only used during mem_init.
* also, check if we need a PHYS_OFFSET.
*/
#define __pa(x) ((unsigned long)(x) - PAGE_OFFSET)
#define __va(x) ((void *)((unsigned long)(x) + PAGE_OFFSET))
/* The "page frame" descriptor is defined in linux/mm.h */
struct page;
/* Returns page frame descriptor for virtual address. */
#define virt_to_page(kaddr) pfn_to_page(PFN_DOWN(__pa(kaddr)))
/* Default vm area behavior is non-executable. */
#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | \
VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
#define pfn_valid(pfn) ((pfn) < max_mapnr)
#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
/* Need to not use a define for linesize; may move this to another file. */
static inline void clear_page(void *page)
{
/* This can only be done on pages with L1 WB cache */
asm volatile(
" loop0(1f,%1);\n"
"1: { dczeroa(%0);\n"
" %0 = add(%0,#32); }:endloop0\n"
: "+r" (page)
: "r" (PAGE_SIZE/32)
: "lc0", "sa0", "memory"
);
}
#define copy_page(to, from) memcpy((to), (from), PAGE_SIZE)
/*
* Under assumption that kernel always "sees" user map...
*/
#define clear_user_page(page, vaddr, pg) clear_page(page)
#define copy_user_page(to, from, vaddr, pg) copy_page(to, from)
/*
* page_to_phys - convert page to physical address
* @page - pointer to page entry in mem_map
*/
#define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT)
/*
* For port to Hexagon Virtual Machine, MAYBE we check for attempts
* to reference reserved HVM space, but in any case, the VM will be
* protected.
*/
#define kern_addr_valid(addr) (1)
#include <asm-generic/memory_model.h>
/* XXX Todo: implement assembly-optimized version of getorder. */
#include <asm-generic/getorder.h>
#endif /* ifdef __ASSEMBLY__ */
#endif /* ifdef __KERNEL__ */
#endif
/*
* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*/
#ifndef _ASM_PARAM_H
#define _ASM_PARAM_H
#define EXEC_PAGESIZE 16384
#include <asm-generic/param.h>
#endif
/*
* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*/
#ifndef _ASM_PERF_EVENT_H
#define _ASM_PERF_EVENT_H
#define PERF_EVENT_INDEX_OFFSET 0
#endif /* _ASM_PERF_EVENT_H */
/*
* Page table support for the Hexagon architecture
*
* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*/
#ifndef _ASM_PGALLOC_H
#define _ASM_PGALLOC_H
#include <asm/mem-layout.h>
#include <asm/atomic.h>
#define check_pgt_cache() do {} while (0)
extern unsigned long long kmap_generation;
/*
* Page table creation interface
*/
static inline pgd_t *pgd_alloc(struct mm_struct *mm)
{
pgd_t *pgd;
pgd = (pgd_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
/*
* There may be better ways to do this, but to ensure
* that new address spaces always contain the kernel
* base mapping, and to ensure that the user area is
* initially marked invalid, initialize the new map
* map with a copy of the kernel's persistent map.
*/
memcpy(pgd, swapper_pg_dir, PTRS_PER_PGD*sizeof(pgd_t *));
mm->context.generation = kmap_generation;
/* Physical version is what is passed to virtual machine on switch */
mm->context.ptbase = __pa(pgd);
return pgd;
}
static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
{
free_page((unsigned long) pgd);
}
static inline struct page *pte_alloc_one(struct mm_struct *mm,
unsigned long address)
{
struct page *pte;
pte = alloc_page(GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO);
if (pte)
pgtable_page_ctor(pte);
return pte;
}
/* _kernel variant gets to use a different allocator */
static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
unsigned long address)
{
gfp_t flags = GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO;
return (pte_t *) __get_free_page(flags);
}
static inline void pte_free(struct mm_struct *mm, struct page *pte)
{
pgtable_page_dtor(pte);
__free_page(pte);
}
static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
{
free_page((unsigned long)pte);
}
static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
pgtable_t pte)
{
/*
* Conveniently, zero in 3 LSB means indirect 4K page table.
* Not so convenient when you're trying to vary the page size.
*/
set_pmd(pmd, __pmd(((unsigned long)page_to_pfn(pte) << PAGE_SHIFT) |
HEXAGON_L1_PTE_SIZE));
}
/*
* Other architectures seem to have ways of making all processes
* share the same pmd's for their kernel mappings, but the v0.3
* Hexagon VM spec has a "monolithic" L1 table for user and kernel
* segments. We track "generations" of the kernel map to minimize
* overhead, and update the "slave" copies of the kernel mappings
* as part of switch_mm. However, we still need to update the
* kernel map of the active thread who's calling pmd_populate_kernel...
*/
static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
pte_t *pte)
{
extern spinlock_t kmap_gen_lock;
pmd_t *ppmd;
int pmdindex;
spin_lock(&kmap_gen_lock);
kmap_generation++;
mm->context.generation = kmap_generation;
current->active_mm->context.generation = kmap_generation;
spin_unlock(&kmap_gen_lock);
set_pmd(pmd, __pmd(((unsigned long)__pa(pte)) | HEXAGON_L1_PTE_SIZE));
/*
* Now the "slave" copy of the current thread.
* This is pointer arithmetic, not byte addresses!
*/
pmdindex = (pgd_t *)pmd - mm->pgd;
ppmd = (pmd_t *)current->active_mm->pgd + pmdindex;
set_pmd(ppmd, __pmd(((unsigned long)__pa(pte)) | HEXAGON_L1_PTE_SIZE));
if (pmdindex > max_kernel_seg)
max_kernel_seg = pmdindex;
}
#define __pte_free_tlb(tlb, pte, addr) \
do { \
pgtable_page_dtor((pte)); \
tlb_remove_page((tlb), (pte)); \
} while (0)
#endif
/*
* Page table support for the Hexagon architecture
*
* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*/
#ifndef _ASM_PGTABLE_H
#define _ASM_PGTABLE_H
/*
* Page table definitions for Qualcomm Hexagon processor.
*/
#include <linux/swap.h>
#include <asm/page.h>
#include <asm-generic/pgtable-nopmd.h>
/* A handy thing to have if one has the RAM. Declared in head.S */
extern unsigned long empty_zero_page;
extern unsigned long zero_page_mask;
/*
* The PTE model described here is that of the Hexagon Virtual Machine,
* which autonomously walks 2-level page tables. At a lower level, we
* also describe the RISCish software-loaded TLB entry structure of
* the underlying Hexagon processor. A kernel built to run on the
* virtual machine has no need to know about the underlying hardware.
*/
#include <asm/vm_mmu.h>
/*
* To maximize the comfort level for the PTE manipulation macros,
* define the "well known" architecture-specific bits.
*/
#define _PAGE_READ __HVM_PTE_R
#define _PAGE_WRITE __HVM_PTE_W
#define _PAGE_EXECUTE __HVM_PTE_X
#define _PAGE_USER __HVM_PTE_U
/*
* We have a total of 4 "soft" bits available in the abstract PTE.
* The two mandatory software bits are Dirty and Accessed.
* To make nonlinear swap work according to the more recent
* model, we want a low order "Present" bit to indicate whether
* the PTE describes MMU programming or swap space.
*/
#define _PAGE_PRESENT (1<<0)
#define _PAGE_DIRTY (1<<1)
#define _PAGE_ACCESSED (1<<2)
/*
* _PAGE_FILE is only meaningful if _PAGE_PRESENT is false, while
* _PAGE_DIRTY is only meaningful if _PAGE_PRESENT is true.
* So we can overload the bit...
*/
#define _PAGE_FILE _PAGE_DIRTY /* set: pagecache, unset = swap */
/*
* For now, let's say that Valid and Present are the same thing.
* Alternatively, we could say that it's the "or" of R, W, and X
* permissions.
*/
#define _PAGE_VALID _PAGE_PRESENT
/*
* We're not defining _PAGE_GLOBAL here, since there's no concept
* of global pages or ASIDs exposed to the Hexagon Virtual Machine,
* and we want to use the same page table structures and macros in
* the native kernel as we do in the virtual machine kernel.
* So we'll put up with a bit of inefficiency for now...
*/
/*
* Top "FOURTH" level (pgd), which for the Hexagon VM is really
* only the second from the bottom, pgd and pud both being collapsed.
* Each entry represents 4MB of virtual address space, 4K of table
* thus maps the full 4GB.
*/
#define PGDIR_SHIFT 22
#define PTRS_PER_PGD 1024
#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
#define PGDIR_MASK (~(PGDIR_SIZE-1))
#ifdef CONFIG_PAGE_SIZE_4KB
#define PTRS_PER_PTE 1024
#endif
#ifdef CONFIG_PAGE_SIZE_16KB
#define PTRS_PER_PTE 256
#endif
#ifdef CONFIG_PAGE_SIZE_64KB
#define PTRS_PER_PTE 64
#endif
#ifdef CONFIG_PAGE_SIZE_256KB
#define PTRS_PER_PTE 16
#endif
#ifdef CONFIG_PAGE_SIZE_1MB
#define PTRS_PER_PTE 4
#endif
/* Any bigger and the PTE disappears. */
#define pgd_ERROR(e) \
printk(KERN_ERR "%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__,\
pgd_val(e))
/*
* Page Protection Constants. Includes (in this variant) cache attributes.
*/
extern unsigned long _dflt_cache_att;
#define PAGE_NONE __pgprot(_PAGE_PRESENT | _PAGE_USER | \
_dflt_cache_att)
#define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | \
_PAGE_READ | _PAGE_EXECUTE | _dflt_cache_att)
#define PAGE_COPY PAGE_READONLY
#define PAGE_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
_PAGE_READ | _PAGE_EXECUTE | _dflt_cache_att)
#define PAGE_COPY_EXEC PAGE_EXEC
#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | \
_PAGE_EXECUTE | _PAGE_WRITE | _dflt_cache_att)
#define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_READ | \
_PAGE_WRITE | _PAGE_EXECUTE | _dflt_cache_att)
/*
* Aliases for mapping mmap() protection bits to page protections.
* These get used for static initialization, so using the _dflt_cache_att
* variable for the default cache attribute isn't workable. If the
* default gets changed at boot time, the boot option code has to
* update data structures like the protaction_map[] array.
*/
#define CACHEDEF (CACHE_DEFAULT << 6)
/* Private (copy-on-write) page protections. */
#define __P000 __pgprot(_PAGE_PRESENT | _PAGE_USER | CACHEDEF)
#define __P001 __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | CACHEDEF)
#define __P010 __P000 /* Write-only copy-on-write */
#define __P011 __P001 /* Read/Write copy-on-write */
#define __P100 __pgprot(_PAGE_PRESENT | _PAGE_USER | \
_PAGE_EXECUTE | CACHEDEF)
#define __P101 __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_EXECUTE | \
_PAGE_READ | CACHEDEF)
#define __P110 __P100 /* Write/execute copy-on-write */
#define __P111 __P101 /* Read/Write/Execute, copy-on-write */
/* Shared page protections. */
#define __S000 __P000
#define __S001 __P001
#define __S010 __pgprot(_PAGE_PRESENT | _PAGE_USER | \
_PAGE_WRITE | CACHEDEF)
#define __S011 __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | \
_PAGE_WRITE | CACHEDEF)
#define __S100 __pgprot(_PAGE_PRESENT | _PAGE_USER | \
_PAGE_EXECUTE | CACHEDEF)
#define __S101 __P101
#define __S110 __pgprot(_PAGE_PRESENT | _PAGE_USER | \
_PAGE_EXECUTE | _PAGE_WRITE | CACHEDEF)
#define __S111 __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | \
_PAGE_EXECUTE | _PAGE_WRITE | CACHEDEF)
extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; /* located in head.S */
/* Seems to be zero even in architectures where the zero page is firewalled? */
#define FIRST_USER_ADDRESS 0
#define pte_special(pte) 0
#define pte_mkspecial(pte) (pte)
/* HUGETLB not working currently */
#ifdef CONFIG_HUGETLB_PAGE
#define pte_mkhuge(pte) __pte((pte_val(pte) & ~0x3) | HVM_HUGEPAGE_SIZE)
#endif
/*
* For now, assume that higher-level code will do TLB/MMU invalidations
* and don't insert that overhead into this low-level function.
*/
extern void sync_icache_dcache(pte_t pte);
#define pte_present_exec_user(pte) \
((pte_val(pte) & (_PAGE_EXECUTE | _PAGE_USER)) == \
(_PAGE_EXECUTE | _PAGE_USER))
static inline void set_pte(pte_t *ptep, pte_t pteval)
{
/* should really be using pte_exec, if it weren't declared later. */
if (pte_present_exec_user(pteval))
sync_icache_dcache(pteval);
*ptep = pteval;
}
/*
* For the Hexagon Virtual Machine MMU (or its emulation), a null/invalid
* L1 PTE (PMD/PGD) has 7 in the least significant bits. For the L2 PTE
* (Linux PTE), the key is to have bits 11..9 all zero. We'd use 0x7
* as a universal null entry, but some of those least significant bits
* are interpreted by software.
*/
#define _NULL_PMD 0x7
#define _NULL_PTE 0x0
static inline void pmd_clear(pmd_t *pmd_entry_ptr)
{
pmd_val(*pmd_entry_ptr) = _NULL_PMD;
}
/*
* Conveniently, a null PTE value is invalid.
*/
static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
pte_t *ptep)
{
pte_val(*ptep) = _NULL_PTE;
}
#ifdef NEED_PMD_INDEX_DESPITE_BEING_2_LEVEL
/**
* pmd_index - returns the index of the entry in the PMD page
* which would control the given virtual address
*/
#define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
#endif
/**
* pgd_index - returns the index of the entry in the PGD page
* which would control the given virtual address
*
* This returns the *index* for the address in the pgd_t
*/
#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
/*
* pgd_offset - find an offset in a page-table-directory
*/
#define pgd_offset(mm, addr) ((mm)->pgd + pgd_index(addr))
/*
* pgd_offset_k - get kernel (init_mm) pgd entry pointer for addr
*/
#define pgd_offset_k(address) pgd_offset(&init_mm, address)
/**
* pmd_none - check if pmd_entry is mapped
* @pmd_entry: pmd entry
*
* MIPS checks it against that "invalid pte table" thing.
*/
static inline int pmd_none(pmd_t pmd)
{
return pmd_val(pmd) == _NULL_PMD;
}
/**
* pmd_present - is there a page table behind this?
* Essentially the inverse of pmd_none. We maybe
* save an inline instruction by defining it this
* way, instead of simply "!pmd_none".
*/
static inline int pmd_present(pmd_t pmd)
{
return pmd_val(pmd) != (unsigned long)_NULL_PMD;
}
/**
* pmd_bad - check if a PMD entry is "bad". That might mean swapped out.
* As we have no known cause of badness, it's null, as it is for many
* architectures.
*/
static inline int pmd_bad(pmd_t pmd)
{
return 0;
}
/*
* pmd_page - converts a PMD entry to a page pointer
*/
#define pmd_page(pmd) (pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT))
#define pmd_pgtable(pmd) pmd_page(pmd)
/**
* pte_none - check if pte is mapped
* @pte: pte_t entry
*/
static inline int pte_none(pte_t pte)
{
return pte_val(pte) == _NULL_PTE;
};
/*
* pte_present - check if page is present
*/
static inline int pte_present(pte_t pte)
{
return pte_val(pte) & _PAGE_PRESENT;
}
/* mk_pte - make a PTE out of a page pointer and protection bits */
#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
/* pte_page - returns a page (frame pointer/descriptor?) based on a PTE */
#define pte_page(x) pfn_to_page(pte_pfn(x))
/* pte_mkold - mark PTE as not recently accessed */
static inline pte_t pte_mkold(pte_t pte)
{
pte_val(pte) &= ~_PAGE_ACCESSED;
return pte;
}
/* pte_mkyoung - mark PTE as recently accessed */
static inline pte_t pte_mkyoung(pte_t pte)
{
pte_val(pte) |= _PAGE_ACCESSED;
return pte;
}
/* pte_mkclean - mark page as in sync with backing store */
static inline pte_t pte_mkclean(pte_t pte)
{
pte_val(pte) &= ~_PAGE_DIRTY;
return pte;
}
/* pte_mkdirty - mark page as modified */
static inline pte_t pte_mkdirty(pte_t pte)
{
pte_val(pte) |= _PAGE_DIRTY;
return pte;
}
/* pte_young - "is PTE marked as accessed"? */
static inline int pte_young(pte_t pte)
{
return pte_val(pte) & _PAGE_ACCESSED;
}
/* pte_dirty - "is PTE dirty?" */
static inline int pte_dirty(pte_t pte)
{
return pte_val(pte) & _PAGE_DIRTY;
}
/* pte_modify - set protection bits on PTE */
static inline pte_t pte_modify(pte_t pte, pgprot_t prot)
{
pte_val(pte) &= PAGE_MASK;
pte_val(pte) |= pgprot_val(prot);
return pte;
}
/* pte_wrprotect - mark page as not writable */
static inline pte_t pte_wrprotect(pte_t pte)
{
pte_val(pte) &= ~_PAGE_WRITE;
return pte;
}
/* pte_mkwrite - mark page as writable */
static inline pte_t pte_mkwrite(pte_t pte)
{
pte_val(pte) |= _PAGE_WRITE;
return pte;
}
/* pte_mkexec - mark PTE as executable */
static inline pte_t pte_mkexec(pte_t pte)
{
pte_val(pte) |= _PAGE_EXECUTE;
return pte;
}
/* pte_read - "is PTE marked as readable?" */
static inline int pte_read(pte_t pte)
{
return pte_val(pte) & _PAGE_READ;
}
/* pte_write - "is PTE marked as writable?" */
static inline int pte_write(pte_t pte)
{
return pte_val(pte) & _PAGE_WRITE;
}
/* pte_exec - "is PTE marked as executable?" */
static inline int pte_exec(pte_t pte)
{
return pte_val(pte) & _PAGE_EXECUTE;
}
/* __pte_to_swp_entry - extract swap entry from PTE */
#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
/* __swp_entry_to_pte - extract PTE from swap entry */
#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
/* pfn_pte - convert page number and protection value to page table entry */
#define pfn_pte(pfn, pgprot) __pte((pfn << PAGE_SHIFT) | pgprot_val(pgprot))
/* pte_pfn - convert pte to page frame number */
#define pte_pfn(pte) (pte_val(pte) >> PAGE_SHIFT)
#define set_pmd(pmdptr, pmdval) (*(pmdptr) = (pmdval))
/*
* set_pte_at - update page table and do whatever magic may be
* necessary to make the underlying hardware/firmware take note.
*
* VM may require a virtual instruction to alert the MMU.
*/
#define set_pte_at(mm, addr, ptep, pte) set_pte(ptep, pte)
/*
* May need to invoke the virtual machine as well...
*/
#define pte_unmap(pte) do { } while (0)
#define pte_unmap_nested(pte) do { } while (0)
/*
* pte_offset_map - returns the linear address of the page table entry
* corresponding to an address
*/
#define pte_offset_map(dir, address) \
((pte_t *)page_address(pmd_page(*(dir))) + __pte_offset(address))
#define pte_offset_map_nested(pmd, addr) pte_offset_map(pmd, addr)
/* pte_offset_kernel - kernel version of pte_offset */
#define pte_offset_kernel(dir, address) \
((pte_t *) (unsigned long) __va(pmd_val(*dir) & PAGE_MASK) \
+ __pte_offset(address))
/* ZERO_PAGE - returns the globally shared zero page */
#define ZERO_PAGE(vaddr) (virt_to_page(&empty_zero_page))
#define __pte_offset(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
/* Nothing special about IO remapping at this point */
#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
remap_pfn_range(vma, vaddr, pfn, size, prot)
/* I think this is in case we have page table caches; needed by init/main.c */
#define pgtable_cache_init() do { } while (0)
/*
* Swap/file PTE definitions. If _PAGE_PRESENT is zero, the rest of the
* PTE is interpreted as swap information. Depending on the _PAGE_FILE
* bit, the remaining free bits are eitehr interpreted as a file offset
* or a swap type/offset tuple. Rather than have the TLB fill handler
* test _PAGE_PRESENT, we're going to reserve the permissions bits
* and set them to all zeros for swap entries, which speeds up the
* miss handler at the cost of 3 bits of offset. That trade-off can
* be revisited if necessary, but Hexagon processor architecture and
* target applications suggest a lot of TLB misses and not much swap space.
*
* Format of swap PTE:
* bit 0: Present (zero)
* bit 1: _PAGE_FILE (zero)
* bits 2-6: swap type (arch independent layer uses 5 bits max)
* bits 7-9: bits 2:0 of offset
* bits 10-12: effectively _PAGE_PROTNONE (all zero)
* bits 13-31: bits 21:3 of swap offset
*
* Format of file PTE:
* bit 0: Present (zero)
* bit 1: _PAGE_FILE (zero)
* bits 2-9: bits 7:0 of offset
* bits 10-12: effectively _PAGE_PROTNONE (all zero)
* bits 13-31: bits 26:8 of swap offset
*
* The split offset makes some of the following macros a little gnarly,
* but there's plenty of precedent for this sort of thing.
*/
#define PTE_FILE_MAX_BITS 27
/* Used for swap PTEs */
#define __swp_type(swp_pte) (((swp_pte).val >> 2) & 0x1f)
#define __swp_offset(swp_pte) \
((((swp_pte).val >> 7) & 0x7) | (((swp_pte).val >> 10) & 0x003ffff8))
#define __swp_entry(type, offset) \
((swp_entry_t) { \
((type << 2) | \
((offset & 0x3ffff8) << 10) | ((offset & 0x7) << 7)) })
/* Used for file PTEs */
#define pte_file(pte) \
((pte_val(pte) & (_PAGE_FILE | _PAGE_PRESENT)) == _PAGE_FILE)
#define pte_to_pgoff(pte) \
(((pte_val(pte) >> 2) & 0xff) | ((pte_val(pte) >> 5) & 0x07ffff00))
#define pgoff_to_pte(off) \
((pte_t) { ((((off) & 0x7ffff00) << 5) | (((off) & 0xff) << 2)\
| _PAGE_FILE) })
/* Oh boy. There are a lot of possible arch overrides found in this file. */
#include <asm-generic/pgtable.h>
#endif
/*
* Process/processor support for the Hexagon architecture
*
* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*/
#ifndef _ASM_PROCESSOR_H
#define _ASM_PROCESSOR_H
#ifndef __ASSEMBLY__
#include <asm/mem-layout.h>
#include <asm/registers.h>
#include <asm/hexagon_vm.h>
/* must be a macro */
#define current_text_addr() ({ __label__ _l; _l: &&_l; })
/* task_struct, defined elsewhere, is the "process descriptor" */
struct task_struct;
/* this is defined in arch/process.c */
extern pid_t kernel_thread(int (*fn)(void *), void * arg, unsigned long flags);
extern unsigned long thread_saved_pc(struct task_struct *tsk);
extern void start_thread(struct pt_regs *, unsigned long, unsigned long);
/*
* thread_struct is supposed to be for context switch data.
* Specifically, to hold the state necessary to perform switch_to...
*/
struct thread_struct {
void *switch_sp;
};
/*
* initializes thread_struct
* The only thing we have in there is switch_sp
* which doesn't really need to be initialized.
*/
#define INIT_THREAD { \
}
#define cpu_relax() __vmyield()
/*
* "Unlazying all lazy status" occurs here.
*/
static inline void prepare_to_copy(struct task_struct *tsk)
{
}
/*
* Decides where the kernel will search for a free chunk of vm space during
* mmaps.
* See also arch_get_unmapped_area.
* Doesn't affect if you have MAX_FIXED in the page flags set though...
*
* Apparently the convention is that ld.so will ask for "unmapped" private
* memory to be allocated SOMEWHERE, but it also asks for memory explicitly
* via MAP_FIXED at the lower * addresses starting at VA=0x0.
*
* If the two requests collide, you get authentic segfaulting action, so
* you have to kick the "unmapped" base requests higher up.
*/
#define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE/3))
#define task_pt_regs(task) \
((struct pt_regs *)(task_stack_page(task) + THREAD_SIZE) - 1)
#define KSTK_EIP(tsk) (pt_elr(task_pt_regs(tsk)))
#define KSTK_ESP(tsk) (pt_psp(task_pt_regs(tsk)))
/* Free all resources held by a thread; defined in process.c */
extern void release_thread(struct task_struct *dead_task);
/* Get wait channel for task P. */
extern unsigned long get_wchan(struct task_struct *p);
/* The following stuff is pretty HEXAGON specific. */
/* This is really just here for __switch_to.
Offsets are pulled via asm-offsets.c */
/*
* No real reason why VM and native switch stacks should be different.
* Ultimately this should merge. Note that Rev C. ABI called out only
* R24-27 as callee saved GPRs needing explicit attention (R29-31 being
* dealt with automagically by allocframe), but the current ABI has
* more, R16-R27. By saving more, the worst case is that we waste some
* cycles if building with the old compilers.
*/
struct hexagon_switch_stack {
unsigned long long r1716;
unsigned long long r1918;
unsigned long long r2120;
unsigned long long r2322;
unsigned long long r2524;
unsigned long long r2726;
unsigned long fp;
unsigned long lr;
};
#endif /* !__ASSEMBLY__ */
#endif
/*
* Ptrace definitions for the Hexagon architecture
*
* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*/
#ifndef _ASM_PTRACE_H
#define _ASM_PTRACE_H
#include <asm/registers.h>
#define instruction_pointer(regs) pt_elr(regs)
#define user_stack_pointer(regs) ((regs)->r29)
#define profile_pc(regs) instruction_pointer(regs)
/* kprobe-based event tracer support */
extern int regs_query_register_offset(const char *name);
extern const char *regs_query_register_name(unsigned int offset);
#endif
/*
* Register definitions for the Hexagon architecture
*
* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*/
#ifndef _ASM_REGISTERS_H
#define _ASM_REGISTERS_H
#define SP r29
#ifndef __ASSEMBLY__
/* See kernel/entry.S for further documentation. */
/*
* Entry code copies the event record out of guest registers into
* this structure (which is on the stack).
*/
struct hvm_event_record {
unsigned long vmel; /* Event Linkage (return address) */
unsigned long vmest; /* Event context - pre-event SSR values */
unsigned long vmpsp; /* Previous stack pointer */
unsigned long vmbadva; /* Bad virtual address for addressing events */
};
struct pt_regs {
long restart_r0; /* R0 checkpoint for syscall restart */
long syscall_nr; /* Only used in system calls */
union {
struct {
unsigned long usr;
unsigned long preds;
};
long long int predsusr;
};
union {
struct {
unsigned long m0;
unsigned long m1;
};
long long int m1m0;
};
union {
struct {
unsigned long sa1;
unsigned long lc1;
};
long long int lc1sa1;
};
union {
struct {
unsigned long sa0;
unsigned long lc0;
};
long long int lc0sa0;
};
union {
struct {
unsigned long gp;
unsigned long ugp;
};
long long int ugpgp;
};
/*
* Be extremely careful with rearranging these, if at all. Some code
* assumes the 32 registers exist exactly like this in memory;
* e.g. kernel/ptrace.c
* e.g. kernel/signal.c (restore_sigcontext)
*/
union {
struct {
unsigned long r00;
unsigned long r01;
};
long long int r0100;
};
union {
struct {
unsigned long r02;
unsigned long r03;
};
long long int r0302;
};
union {
struct {
unsigned long r04;
unsigned long r05;
};
long long int r0504;
};
union {
struct {
unsigned long r06;
unsigned long r07;
};
long long int r0706;
};
union {
struct {
unsigned long r08;
unsigned long r09;
};
long long int r0908;
};
union {
struct {
unsigned long r10;
unsigned long r11;
};
long long int r1110;
};
union {
struct {
unsigned long r12;
unsigned long r13;
};
long long int r1312;
};
union {
struct {
unsigned long r14;
unsigned long r15;
};
long long int r1514;
};
union {
struct {
unsigned long r16;
unsigned long r17;
};
long long int r1716;
};
union {
struct {
unsigned long r18;
unsigned long r19;
};
long long int r1918;
};
union {
struct {
unsigned long r20;
unsigned long r21;
};
long long int r2120;
};
union {
struct {
unsigned long r22;
unsigned long r23;
};
long long int r2322;
};
union {
struct {
unsigned long r24;
unsigned long r25;
};
long long int r2524;
};
union {
struct {
unsigned long r26;
unsigned long r27;
};
long long int r2726;
};
union {
struct {
unsigned long r28;
unsigned long r29;
};
long long int r2928;
};
union {
struct {
unsigned long r30;
unsigned long r31;
};
long long int r3130;
};
/* VM dispatch pushes event record onto stack - we can build on it */
struct hvm_event_record hvmer;
};
/* Defines to conveniently access the values */
/*
* As of the VM spec 0.5, these registers are now set/retrieved via a
* VM call. On the in-bound side, we just fetch the values
* at the entry points and stuff them into the old record in pt_regs.
* However, on the outbound side, probably at VM rte, we set the
* registers back.
*/
#define pt_elr(regs) ((regs)->hvmer.vmel)
#define pt_set_elr(regs, val) ((regs)->hvmer.vmel = (val))
#define pt_cause(regs) ((regs)->hvmer.vmest & (HVM_VMEST_CAUSE_MSK))
#define user_mode(regs) \
(((regs)->hvmer.vmest & (HVM_VMEST_UM_MSK << HVM_VMEST_UM_SFT)) != 0)
#define ints_enabled(regs) \
(((regs)->hvmer.vmest & (HVM_VMEST_IE_MSK << HVM_VMEST_IE_SFT)) != 0)
#define pt_psp(regs) ((regs)->hvmer.vmpsp)
#define pt_badva(regs) ((regs)->hvmer.vmbadva)
#define pt_set_rte_sp(regs, sp) do {\
pt_psp(regs) = (sp);\
(regs)->SP = (unsigned long) &((regs)->hvmer);\
} while (0)
#define pt_set_kmode(regs) \
(regs)->hvmer.vmest = (HVM_VMEST_IE_MSK << HVM_VMEST_IE_SFT)
#define pt_set_usermode(regs) \
(regs)->hvmer.vmest = (HVM_VMEST_UM_MSK << HVM_VMEST_UM_SFT) \
| (HVM_VMEST_IE_MSK << HVM_VMEST_IE_SFT)
#endif /* ifndef __ASSEMBLY */
#endif
/*
* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*/
#ifndef _ASM_SETUP_H
#define _ASM_SETUP_H
#include <linux/init.h>
#include <asm-generic/setup.h>
extern char external_cmdline_buffer;
void __init setup_arch_memory(void);
#endif
/*
* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*/
#ifndef _ASM_SIGCONTEXT_H
#define _ASM_SIGCONTEXT_H
#include <asm/user.h>
/*
* Signal context structure - contains all info to do with the state
* before the signal handler was invoked. Note: only add new entries
* to the end of the structure.
*/
struct sigcontext {
struct user_regs_struct sc_regs;
} __aligned(8);
#endif
/*
* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*/
#ifndef _ASM_SIGNAL_H
#define _ASM_SIGNAL_H
extern unsigned long __rt_sigtramp_template[2];
#include <asm-generic/signal.h>
#endif
/*
* SMP definitions for the Hexagon architecture
*
* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*/
#ifndef __ASM_SMP_H
#define __ASM_SMP_H
#include <linux/cpumask.h>
#define raw_smp_processor_id() (current_thread_info()->cpu)
enum ipi_message_type {
IPI_NOP = 0,
IPI_RESCHEDULE = 1,
IPI_CALL_FUNC,
IPI_CALL_FUNC_SINGLE,
IPI_CPU_STOP,
IPI_TIMER,
};
extern void send_ipi(const struct cpumask *cpumask, enum ipi_message_type msg);
extern void smp_start_cpus(void);
extern void arch_send_call_function_single_ipi(int cpu);
extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
extern void smp_vm_unmask_irq(void *info);
#endif
/*
* Spinlock support for the Hexagon architecture
*
* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
*
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*/
#ifndef _ASM_SPINLOCK_H
#define _ASM_SPINLOCK_H
#include <asm/irqflags.h>
/*
* This file is pulled in for SMP builds.
* Really need to check all the barrier stuff for "true" SMP
*/
/*
* Read locks:
* - load the lock value
* - increment it
* - if the lock value is still negative, go back and try again.
* - unsuccessful store is unsuccessful. Go back and try again. Loser.
* - successful store new lock value if positive -> lock acquired
*/
static inline void arch_read_lock(arch_rwlock_t *lock)
{
__asm__ __volatile__(
"1: R6 = memw_locked(%0);\n"
" { P3 = cmp.ge(R6,#0); R6 = add(R6,#1);}\n"
" { if !P3 jump 1b; }\n"
" memw_locked(%0,P3) = R6;\n"
" { if !P3 jump 1b; }\n"
:
: "r" (&lock->lock)
: "memory", "r6", "p3"
);
}
static inline void arch_read_unlock(arch_rwlock_t *lock)
{
__asm__ __volatile__(
"1: R6 = memw_locked(%0);\n"
" R6 = add(R6,#-1);\n"
" memw_locked(%0,P3) = R6\n"
" if !P3 jump 1b;\n"
:
: "r" (&lock->lock)
: "memory", "r6", "p3"
);
}
/* I think this returns 0 on fail, 1 on success. */
static inline int arch_read_trylock(arch_rwlock_t *lock)
{
int temp;
__asm__ __volatile__(
" R6 = memw_locked(%1);\n"
" { %0 = #0; P3 = cmp.ge(R6,#0); R6 = add(R6,#1);}\n"
" { if !P3 jump 1f; }\n"
" memw_locked(%1,P3) = R6;\n"
" { %0 = P3 }\n"
"1:\n"
: "=&r" (temp)
: "r" (&lock->lock)
: "memory", "r6", "p3"
);
return temp;
}
static inline int arch_read_can_lock(arch_rwlock_t *rwlock)
{
return rwlock->lock == 0;
}
static inline int arch_write_can_lock(arch_rwlock_t *rwlock)
{
return rwlock->lock == 0;
}
/* Stuffs a -1 in the lock value? */
static inline void arch_write_lock(arch_rwlock_t *lock)
{
__asm__ __volatile__(
"1: R6 = memw_locked(%0)\n"
" { P3 = cmp.eq(R6,#0); R6 = #-1;}\n"
" { if !P3 jump 1b; }\n"
" memw_locked(%0,P3) = R6;\n"
" { if !P3 jump 1b; }\n"
:
: "r" (&lock->lock)
: "memory", "r6", "p3"
);
}
static inline int arch_write_trylock(arch_rwlock_t *lock)
{
int temp;
__asm__ __volatile__(
" R6 = memw_locked(%1)\n"
" { %0 = #0; P3 = cmp.eq(R6,#0); R6 = #-1;}\n"
" { if !P3 jump 1f; }\n"
" memw_locked(%1,P3) = R6;\n"
" %0 = P3;\n"
"1:\n"
: "=&r" (temp)
: "r" (&lock->lock)
: "memory", "r6", "p3"
);
return temp;
}
static inline void arch_write_unlock(arch_rwlock_t *lock)
{
smp_mb();
lock->lock = 0;
}
static inline void arch_spin_lock(arch_spinlock_t *lock)
{
__asm__ __volatile__(
"1: R6 = memw_locked(%0);\n"
" P3 = cmp.eq(R6,#0);\n"
" { if !P3 jump 1b; R6 = #1; }\n"
" memw_locked(%0,P3) = R6;\n"
" { if !P3 jump 1b; }\n"
:
: "r" (&lock->lock)
: "memory", "r6", "p3"
);
}
static inline void arch_spin_unlock(arch_spinlock_t *lock)
{
smp_mb();
lock->lock = 0;
}
static inline unsigned int arch_spin_trylock(arch_spinlock_t *lock)
{
int temp;
__asm__ __volatile__(
" R6 = memw_locked(%1);\n"
" P3 = cmp.eq(R6,#0);\n"
" { if !P3 jump 1f; R6 = #1; %0 = #0; }\n"
" memw_locked(%1,P3) = R6;\n"
" %0 = P3;\n"
"1:\n"
: "=&r" (temp)
: "r" (&lock->lock)
: "memory", "r6", "p3"
);
return temp;
}
/*
* SMP spinlocks are intended to allow only a single CPU at the lock
*/
#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
#define arch_spin_unlock_wait(lock) \
do {while (arch_spin_is_locked(lock)) cpu_relax(); } while (0)
#define arch_spin_is_locked(x) ((x)->lock != 0)
#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
#endif
/*
* Spinlock support for the Hexagon architecture
*
* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*/
#ifndef _ASM_SPINLOCK_TYPES_H
#define _ASM_SPINLOCK_TYPES_H
#include <linux/version.h>
#ifndef __LINUX_SPINLOCK_TYPES_H
# error "please don't include this file directly"
#endif
typedef struct {
volatile unsigned int lock;
} arch_spinlock_t;
#define __ARCH_SPIN_LOCK_UNLOCKED { 0 }
typedef struct {
volatile unsigned int lock;
} arch_rwlock_t;
#define __ARCH_RW_LOCK_UNLOCKED { 0 }
#endif
/*
* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*/
#ifndef _ASM_STRING_H_
#define _ASM_STRING_H_
#ifdef __KERNEL__
#define __HAVE_ARCH_MEMCPY
extern void *memcpy(void *__to, __const__ void *__from, size_t __n);
/* ToDo: use dczeroa, accelerate the compiler-constant zero case */
#define __HAVE_ARCH_MEMSET
extern void *memset(void *__to, int c, size_t __n);
#endif
#endif /* _ASM_STRING_H_ */
/*
* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*/
#ifndef _ASM_SUSPEND_H
#define _ASM_SUSPEND_H
static inline int arch_prepare_suspend(void)
{
return 0;
}
#endif
/*
* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*/
#ifndef _ASM_SWAB_H
#define _ASM_SWAB_H
#define __SWAB_64_THRU_32__
#endif
/*
* Syscall support for the Hexagon architecture
*
* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*/
#ifndef _ASM_HEXAGON_SYSCALL_H
#define _ASM_HEXAGON_SYSCALL_H
typedef long (*syscall_fn)(unsigned long, unsigned long,
unsigned long, unsigned long,
unsigned long, unsigned long);
asmlinkage int sys_execve(char __user *ufilename, char __user * __user *argv,
char __user * __user *envp);
asmlinkage int sys_clone(unsigned long clone_flags, unsigned long newsp,
unsigned long parent_tidp, unsigned long child_tidp);
#define sys_execve sys_execve
#define sys_clone sys_clone
#include <asm-generic/syscalls.h>
extern void *sys_call_table[];
static inline long syscall_get_nr(struct task_struct *task,
struct pt_regs *regs)
{
return regs->r06;
}
static inline void syscall_get_arguments(struct task_struct *task,
struct pt_regs *regs,
unsigned int i, unsigned int n,
unsigned long *args)
{
BUG_ON(i + n > 6);
memcpy(args, &(&regs->r00)[i], n * sizeof(args[0]));
}
#endif
/*
* System level definitions for the Hexagon architecture
*
* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*/
#ifndef _ASM_SYSTEM_H
#define _ASM_SYSTEM_H
#include <linux/linkage.h>
#include <linux/irqflags.h>
#include <asm/atomic.h>
#include <asm/hexagon_vm.h>
struct thread_struct;
extern struct task_struct *__switch_to(struct task_struct *,
struct task_struct *,
struct task_struct *);
#define switch_to(p, n, r) do {\
r = __switch_to((p), (n), (r));\
} while (0)
#define rmb() barrier()
#define read_barrier_depends() barrier()
#define wmb() barrier()
#define mb() barrier()
#define smp_rmb() barrier()
#define smp_read_barrier_depends() barrier()
#define smp_wmb() barrier()
#define smp_mb() barrier()
#define smp_mb__before_atomic_dec() barrier()
#define smp_mb__after_atomic_dec() barrier()
#define smp_mb__before_atomic_inc() barrier()
#define smp_mb__after_atomic_inc() barrier()
/*
* __xchg - atomically exchange a register and a memory location
* @x: value to swap
* @ptr: pointer to memory
* @size: size of the value
*
* Only 4 bytes supported currently.
*
* Note: there was an errata for V2 about .new's and memw_locked.
*
*/
static inline unsigned long __xchg(unsigned long x, volatile void *ptr,
int size)
{
unsigned long retval;
/* Can't seem to use printk or panic here, so just stop */
if (size != 4) do { asm volatile("brkpt;\n"); } while (1);
__asm__ __volatile__ (
"1: %0 = memw_locked(%1);\n" /* load into retval */
" memw_locked(%1,P0) = %2;\n" /* store into memory */
" if !P0 jump 1b;\n"
: "=&r" (retval)
: "r" (ptr), "r" (x)
: "memory", "p0"
);
return retval;
}
/*
* Atomically swap the contents of a register with memory. Should be atomic
* between multiple CPU's and within interrupts on the same CPU.
*/
#define xchg(ptr, v) ((__typeof__(*(ptr)))__xchg((unsigned long)(v), (ptr), \
sizeof(*(ptr))))
/* Set a value and use a memory barrier. Used by the scheduler somewhere. */
#define set_mb(var, value) \
do { var = value; mb(); } while (0)
/*
* see rt-mutex-design.txt; cmpxchg supposedly checks if *ptr == A and swaps.
* looks just like atomic_cmpxchg on our arch currently with a bunch of
* variable casting.
*/
#define __HAVE_ARCH_CMPXCHG 1
#define cmpxchg(ptr, old, new) \
({ \
__typeof__(ptr) __ptr = (ptr); \
__typeof__(*(ptr)) __old = (old); \
__typeof__(*(ptr)) __new = (new); \
__typeof__(*(ptr)) __oldval = 0; \
\
asm volatile( \
"1: %0 = memw_locked(%1);\n" \
" { P0 = cmp.eq(%0,%2);\n" \
" if (!P0.new) jump:nt 2f; }\n" \
" memw_locked(%1,p0) = %3;\n" \
" if (!P0) jump 1b;\n" \
"2:\n" \
: "=&r" (__oldval) \
: "r" (__ptr), "r" (__old), "r" (__new) \
: "memory", "p0" \
); \
__oldval; \
})
/* Should probably shoot for an 8-byte aligned stack pointer */
#define STACK_MASK (~7)
#define arch_align_stack(x) (x & STACK_MASK)
#endif
/*
* Thread support for the Hexagon architecture
*
* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*/
#ifndef _ASM_THREAD_INFO_H
#define _ASM_THREAD_INFO_H
#ifdef __KERNEL__
#ifndef __ASSEMBLY__
#include <asm/processor.h>
#include <asm/registers.h>
#include <asm/page.h>
#endif
#define THREAD_SHIFT 12
#define THREAD_SIZE (1<<THREAD_SHIFT)
#if THREAD_SHIFT >= PAGE_SHIFT
#define THREAD_SIZE_ORDER (THREAD_SHIFT - PAGE_SHIFT)
#else /* don't use standard allocator */
#define __HAVE_ARCH_THREAD_INFO_ALLOCATOR
extern struct thread_info *alloc_thread_info_node(struct task_struct *tsk, int node);
extern void free_thread_info(struct thread_info *ti);
#endif
#ifndef __ASSEMBLY__
typedef struct {
unsigned long seg;
} mm_segment_t;
/*
* This is union'd with the "bottom" of the kernel stack.
* It keeps track of thread info which is handy for routines
* to access quickly.
*/
struct thread_info {
struct task_struct *task; /* main task structure */
struct exec_domain *exec_domain; /* execution domain */
unsigned long flags; /* low level flags */
__u32 cpu; /* current cpu */
int preempt_count; /* 0=>preemptible,<0=>BUG */
mm_segment_t addr_limit; /* segmentation sux */
/*
* used for syscalls somehow;
* seems to have a function pointer and four arguments
*/
struct restart_block restart_block;
/* Points to the current pt_regs frame */
struct pt_regs *regs;
/*
* saved kernel sp at switch_to time;
* not sure if this is used (it's not in the VM model it seems;
* see thread_struct)
*/
unsigned long sp;
};
#else /* !__ASSEMBLY__ */
#include <asm/asm-offsets.h>
#endif /* __ASSEMBLY__ */
/* looks like "linux/hardirq.h" uses this. */
#define PREEMPT_ACTIVE 0x10000000
#ifndef __ASSEMBLY__
#define INIT_THREAD_INFO(tsk) \
{ \
.task = &tsk, \
.exec_domain = &default_exec_domain, \
.flags = 0, \
.cpu = 0, \
.preempt_count = 1, \
.addr_limit = KERNEL_DS, \
.restart_block = { \
.fn = do_no_restart_syscall, \
}, \
.sp = 0, \
.regs = NULL, \
}
#define init_thread_info (init_thread_union.thread_info)
#define init_stack (init_thread_union.stack)
/* Tacky preprocessor trickery */
#define qqstr(s) qstr(s)
#define qstr(s) #s
#define QUOTED_THREADINFO_REG qqstr(THREADINFO_REG)
register struct thread_info *__current_thread_info asm(QUOTED_THREADINFO_REG);
#define current_thread_info() __current_thread_info
#endif /* __ASSEMBLY__ */
/*
* thread information flags
* - these are process state flags that various assembly files
* may need to access
* - pending work-to-be-done flags are in LSW
* - other flags in MSW
*/
#define TIF_SYSCALL_TRACE 0 /* syscall trace active */
#define TIF_NOTIFY_RESUME 1 /* resumption notification requested */
#define TIF_SIGPENDING 2 /* signal pending */
#define TIF_NEED_RESCHED 3 /* rescheduling necessary */
#define TIF_SINGLESTEP 4 /* restore ss @ return to usr mode */
#define TIF_IRET 5 /* return with iret */
#define TIF_RESTORE_SIGMASK 6 /* restore sig mask in do_signal() */
/* true if poll_idle() is polling TIF_NEED_RESCHED */
#define TIF_POLLING_NRFLAG 16
#define TIF_MEMDIE 17 /* OOM killer killed process */
#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
#define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP)
#define _TIF_IRET (1 << TIF_IRET)
#define _TIF_RESTORE_SIGMASK (1 << TIF_RESTORE_SIGMASK)
#define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG)
/* work to do on interrupt/exception return - All but TIF_SYSCALL_TRACE */
#define _TIF_WORK_MASK (0x0000FFFF & ~_TIF_SYSCALL_TRACE)
/* work to do on any return to u-space */
#define _TIF_ALLWORK_MASK 0x0000FFFF
#endif /* __KERNEL__ */
#endif
/*
* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*/
#ifndef ASM_TIME_H
#define ASM_TIME_H
extern cycles_t pcycle_freq_mhz;
extern cycles_t thread_freq_mhz;
extern cycles_t sleep_clk_freq;
void setup_percpu_clockdev(void);
void ipi_timer(void);
#endif
/*
* Timer support for Hexagon
*
* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*/
#ifndef _ASM_TIMER_REGS_H
#define _ASM_TIMER_REGS_H
/* This stuff should go into a platform specific file */
#define TCX0_CLK_RATE 19200
#define TIMER_ENABLE 0
#define TIMER_CLR_ON_MATCH 1
/*
* 8x50 HDD Specs 5-8. Simulator co-sim not fixed until
* release 1.1, and then it's "adjustable" and probably not defaulted.
*/
#define RTOS_TIMER_INT 3
#ifdef CONFIG_HEXAGON_COMET
#define RTOS_TIMER_REGS_ADDR 0xAB000000UL
#endif
#define SLEEP_CLK_RATE 32000
#endif
/*
* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*/
#ifndef _ASM_TIMEX_H
#define _ASM_TIMEX_H
#include <asm-generic/timex.h>
#include <asm/timer-regs.h>
/* Using TCX0 as our clock. CLOCK_TICK_RATE scheduled to be removed. */
#define CLOCK_TICK_RATE TCX0_CLK_RATE
#define ARCH_HAS_READ_CURRENT_TIMER
static inline int read_current_timer(unsigned long *timer_val)
{
*timer_val = (unsigned long) __vmgettime();
return 0;
}
#endif
/*
* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*/
#ifndef _ASM_TLB_H
#define _ASM_TLB_H
#include <linux/pagemap.h>
#include <asm/tlbflush.h>
/*
* We don't need any special per-pte or per-vma handling...
*/
#define tlb_start_vma(tlb, vma) do { } while (0)
#define tlb_end_vma(tlb, vma) do { } while (0)
#define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0)
/*
* .. because we flush the whole mm when it fills up
*/
#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm)
#include <asm-generic/tlb.h>
#endif
/*
* TLB flush support for Hexagon
*
* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*/
#ifndef _ASM_TLBFLUSH_H
#define _ASM_TLBFLUSH_H
#include <linux/mm.h>
#include <asm/processor.h>
/*
* TLB flushing -- in "SMP", these routines get defined to be the
* ones from smp.c, else they are some local flavors.
*/
/*
* These functions are commonly macros, but in the interests of
* VM vs. native implementation and code size, we simply declare
* the function prototypes here.
*/
extern void tlb_flush_all(void);
extern void flush_tlb_mm(struct mm_struct *mm);
extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr);
extern void flush_tlb_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end);
extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
extern void flush_tlb_one(unsigned long);
/*
* "This is called in munmap when we have freed up some page-table pages.
* We don't need to do anything here..."
*
* The VM kernel doesn't walk page tables, and they are passed to the VMM
* by logical address. There doesn't seem to be any possibility that they
* could be referenced by the VM kernel based on a stale mapping, since
* they would only be located by consulting the mm structure, and they
* will have been purged from that structure by the munmap. Seems like
* a noop on HVM as well.
*/
#define flush_tlb_pgtables(mm, start, end)
#endif
/*
* Trap support for Hexagon
*
* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*/
#ifndef _ASM_HEXAGON_TRAPS_H
#define _ASM_HEXAGON_TRAPS_H
#include <asm/registers.h>
extern int die(const char *str, struct pt_regs *regs, long err);
extern int die_if_kernel(char *str, struct pt_regs *regs, long err);
#endif /* _ASM_HEXAGON_TRAPS_H */
/*
* User memory access support for Hexagon
*
* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*/
#ifndef _ASM_UACCESS_H
#define _ASM_UACCESS_H
/*
* User space memory access functions
*/
#include <linux/sched.h>
#include <linux/mm.h>
#include <asm/segment.h>
#include <asm/sections.h>
/*
* access_ok: - Checks if a user space pointer is valid
* @type: Type of access: %VERIFY_READ or %VERIFY_WRITE. Note that
* %VERIFY_WRITE is a superset of %VERIFY_READ - if it is safe
* to write to a block, it is always safe to read from it.
* @addr: User space pointer to start of block to check
* @size: Size of block to check
*
* Context: User context only. This function may sleep.
*
* Checks if a pointer to a block of memory in user space is valid.
*
* Returns true (nonzero) if the memory block *may* be valid, false (zero)
* if it is definitely invalid.
*
* User address space in Hexagon, like x86, goes to 0xbfffffff, so the
* simple MSB-based tests used by MIPS won't work. Some further
* optimization is probably possible here, but for now, keep it
* reasonably simple and not *too* slow. After all, we've got the
* MMU for backup.
*/
#define VERIFY_READ 0
#define VERIFY_WRITE 1
#define __access_ok(addr, size) \
((get_fs().seg == KERNEL_DS.seg) || \
(((unsigned long)addr < get_fs().seg) && \
(unsigned long)size < (get_fs().seg - (unsigned long)addr)))
/*
* When a kernel-mode page fault is taken, the faulting instruction
* address is checked against a table of exception_table_entries.
* Each entry is a tuple of the address of an instruction that may
* be authorized to fault, and the address at which execution should
* be resumed instead of the faulting instruction, so as to effect
* a workaround.
*/
/* Assembly somewhat optimized copy routines */
unsigned long __copy_from_user_hexagon(void *to, const void __user *from,
unsigned long n);
unsigned long __copy_to_user_hexagon(void __user *to, const void *from,
unsigned long n);
#define __copy_from_user(to, from, n) __copy_from_user_hexagon(to, from, n)
#define __copy_to_user(to, from, n) __copy_to_user_hexagon(to, from, n)
/*
* XXX todo: some additonal performance gain is possible by
* implementing __copy_to/from_user_inatomic, which is much
* like __copy_to/from_user, but performs slightly less checking.
*/
__kernel_size_t __clear_user_hexagon(void __user *dest, unsigned long count);
#define __clear_user(a, s) __clear_user_hexagon((a), (s))
#define __strncpy_from_user(dst, src, n) hexagon_strncpy_from_user(dst, src, n)
/* get around the ifndef in asm-generic/uaccess.h */
#define __strnlen_user __strnlen_user
extern long __strnlen_user(const char __user *src, long n);
static inline long hexagon_strncpy_from_user(char *dst, const char __user *src,
long n);
#include <asm-generic/uaccess.h>
/* Todo: an actual accelerated version of this. */
static inline long hexagon_strncpy_from_user(char *dst, const char __user *src,
long n)
{
long res = __strnlen_user(src, n);
/* return from strnlen can't be zero -- that would be rubbish. */
if (res > n) {
copy_from_user(dst, src, n);
return n;
} else {
copy_from_user(dst, src, res);
return res-1;
}
}
#endif
/*
* Syscall support for Hexagon
*
* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*/
#if !defined(_ASM_HEXAGON_UNISTD_H) || defined(__SYSCALL)
#define _ASM_HEXAGON_UNISTD_H
/*
* The kernel pulls this unistd.h in three different ways:
* 1. the "normal" way which gets all the __NR defines
* 2. with __SYSCALL defined to produce function declarations
* 3. with __SYSCALL defined to produce syscall table initialization
* See also: syscalltab.c
*/
#define sys_mmap2 sys_mmap_pgoff
#include <asm-generic/unistd.h>
#endif
/*
* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*/
#ifndef HEXAGON_ASM_USER_H
#define HEXAGON_ASM_USER_H
/*
* Layout for registers passed in elf core dumps to userspace.
*
* Basically a rearranged subset of "pt_regs".
*
* Interested parties: libc, gdb...
*/
struct user_regs_struct {
unsigned long r0;
unsigned long r1;
unsigned long r2;
unsigned long r3;
unsigned long r4;
unsigned long r5;
unsigned long r6;
unsigned long r7;
unsigned long r8;
unsigned long r9;
unsigned long r10;
unsigned long r11;
unsigned long r12;
unsigned long r13;
unsigned long r14;
unsigned long r15;
unsigned long r16;
unsigned long r17;
unsigned long r18;
unsigned long r19;
unsigned long r20;
unsigned long r21;
unsigned long r22;
unsigned long r23;
unsigned long r24;
unsigned long r25;
unsigned long r26;
unsigned long r27;
unsigned long r28;
unsigned long r29;
unsigned long r30;
unsigned long r31;
unsigned long sa0;
unsigned long lc0;
unsigned long sa1;
unsigned long lc1;
unsigned long m0;
unsigned long m1;
unsigned long usr;
unsigned long p3_0;
unsigned long gp;
unsigned long ugp;
unsigned long pc;
unsigned long cause;
unsigned long badva;
unsigned long pad1; /* pad out to 48 words total */
unsigned long pad2; /* pad out to 48 words total */
unsigned long pad3; /* pad out to 48 words total */
};
#endif
/*
* vDSO implementation for Hexagon
*
* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*/
#ifndef __ASM_VDSO_H
#define __ASM_VDSO_H
#include <linux/types.h>
struct hexagon_vdso {
u32 rt_signal_trampoline[2];
};
#endif /* __ASM_VDSO_H */
/*
* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*/
#ifndef _ASM_HEXAGON_VM_FAULT_H
#define _ASM_HEXAGON_VM_FAULT_H
extern void execute_protection_fault(struct pt_regs *);
extern void write_protection_fault(struct pt_regs *);
extern void read_protection_fault(struct pt_regs *);
#endif
/*
* Hexagon VM page table entry definitions
*
* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*/
#ifndef _ASM_VM_MMU_H
#define _ASM_VM_MMU_H
/*
* Shift, mask, and other constants for the Hexagon Virtual Machine
* page tables.
*
* Virtual machine MMU allows first-level entries to either be
* single-level lookup PTEs for very large pages, or PDEs pointing
* to second-level PTEs for smaller pages. If PTE is single-level,
* the least significant bits cannot be used as software bits to encode
* virtual memory subsystem information about the page, and that state
* must be maintained in some parallel data structure.
*/
/* S or Page Size field in PDE */
#define __HVM_PDE_S (0x7 << 0)
#define __HVM_PDE_S_4KB 0
#define __HVM_PDE_S_16KB 1
#define __HVM_PDE_S_64KB 2
#define __HVM_PDE_S_256KB 3
#define __HVM_PDE_S_1MB 4
#define __HVM_PDE_S_4MB 5
#define __HVM_PDE_S_16MB 6
#define __HVM_PDE_S_INVALID 7
/* Masks for L2 page table pointer, as function of page size */
#define __HVM_PDE_PTMASK_4KB 0xfffff000
#define __HVM_PDE_PTMASK_16KB 0xfffffc00
#define __HVM_PDE_PTMASK_64KB 0xffffff00
#define __HVM_PDE_PTMASK_256KB 0xffffffc0
#define __HVM_PDE_PTMASK_1MB 0xfffffff0
/*
* Virtual Machine PTE Bits/Fields
*/
#define __HVM_PTE_T (1<<4)
#define __HVM_PTE_U (1<<5)
#define __HVM_PTE_C (0x7<<6)
#define __HVM_PTE_CVAL(pte) (((pte) & __HVM_PTE_C) >> 6)
#define __HVM_PTE_R (1<<9)
#define __HVM_PTE_W (1<<10)
#define __HVM_PTE_X (1<<11)
/*
* Cache Attributes, to be shifted as necessary for virtual/physical PTEs
*/
#define __HEXAGON_C_WB 0x0 /* Write-back, no L2 */
#define __HEXAGON_C_WT 0x1 /* Write-through, no L2 */
#define __HEXAGON_C_DEV 0x4 /* Device register space */
#define __HEXAGON_C_WT_L2 0x5 /* Write-through, with L2 */
/* this really should be #if CONFIG_HEXAGON_ARCH = 2 but that's not defined */
#if defined(CONFIG_HEXAGON_COMET) || defined(CONFIG_QDSP6_ST1)
#define __HEXAGON_C_UNC __HEXAGON_C_DEV
#else
#define __HEXAGON_C_UNC 0x6 /* Uncached memory */
#endif
#define __HEXAGON_C_WB_L2 0x7 /* Write-back, with L2 */
/*
* This can be overriden, but we're defaulting to the most aggressive
* cache policy, the better to find bugs sooner.
*/
#define CACHE_DEFAULT __HEXAGON_C_WB_L2
/* Masks for physical page address, as a function of page size */
#define __HVM_PTE_PGMASK_4KB 0xfffff000
#define __HVM_PTE_PGMASK_16KB 0xffffc000
#define __HVM_PTE_PGMASK_64KB 0xffff0000
#define __HVM_PTE_PGMASK_256KB 0xfffc0000
#define __HVM_PTE_PGMASK_1MB 0xfff00000
/* Masks for single-level large page lookups */
#define __HVM_PTE_PGMASK_4MB 0xffc00000
#define __HVM_PTE_PGMASK_16MB 0xff000000
/*
* "Big kernel page mappings" (see vm_init_segtable.S)
* are currently 16MB
*/
#define BIG_KERNEL_PAGE_SHIFT 24
#define BIG_KERNEL_PAGE_SIZE (1 << BIG_KERNEL_PAGE_SHIFT)
#endif /* _ASM_VM_MMU_H */
extra-y := head.o vmlinux.lds init_task.o
obj-$(CONFIG_SMP) += smp.o topology.o
obj-y += setup.o irq_cpu.o traps.o syscalltab.o signal.o time.o
obj-y += process.o syscall.o trampoline.o reset.o ptrace.o
obj-y += vdso.o
obj-$(CONFIG_KGDB) += kgdb.o
obj-$(CONFIG_MODULES) += module.o hexagon_ksyms.o
# Modules required to work with the Hexagon Virtual Machine
obj-y += vm_entry.o vm_events.o vm_switch.o vm_ops.o vm_init_segtable.o
obj-y += vm_vectors.o
obj-$(CONFIG_HAS_DMA) += dma.o
obj-$(CONFIG_STACKTRACE) += stacktrace.o
/*
* Copyright (C) 1996 David S. Miller
* Copyright (C) 1997, 1998, 1999, 2000, 2001, 2002, 2003 Ralf Baechle
* Copyright (C) 1999, 2000 Silicon Graphics, Inc.
* Kevin Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com
* Copyright (C) 2000 MIPS Technologies, Inc.
*
* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*/
#include <linux/compat.h>
#include <linux/types.h>
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/interrupt.h>
#include <linux/kbuild.h>
#include <asm/ptrace.h>
#include <asm/processor.h>
/* This file is used to produce asm/linkerscript constants from header
files typically used in c. Specifically, it generates asm-offsets.h */
int main(void)
{
COMMENT("This is a comment.");
/* might get these from somewhere else. */
DEFINE(_PAGE_SIZE, PAGE_SIZE);
DEFINE(_PAGE_SHIFT, PAGE_SHIFT);
BLANK();
COMMENT("Hexagon pt_regs definitions");
OFFSET(_PT_SYSCALL_NR, pt_regs, syscall_nr);
OFFSET(_PT_UGPGP, pt_regs, ugpgp);
OFFSET(_PT_R3130, pt_regs, r3130);
OFFSET(_PT_R2928, pt_regs, r2928);
OFFSET(_PT_R2726, pt_regs, r2726);
OFFSET(_PT_R2524, pt_regs, r2524);
OFFSET(_PT_R2322, pt_regs, r2322);
OFFSET(_PT_R2120, pt_regs, r2120);
OFFSET(_PT_R1918, pt_regs, r1918);
OFFSET(_PT_R1716, pt_regs, r1716);
OFFSET(_PT_R1514, pt_regs, r1514);
OFFSET(_PT_R1312, pt_regs, r1312);
OFFSET(_PT_R1110, pt_regs, r1110);
OFFSET(_PT_R0908, pt_regs, r0908);
OFFSET(_PT_R0706, pt_regs, r0706);
OFFSET(_PT_R0504, pt_regs, r0504);
OFFSET(_PT_R0302, pt_regs, r0302);
OFFSET(_PT_R0100, pt_regs, r0100);
OFFSET(_PT_LC0SA0, pt_regs, lc0sa0);
OFFSET(_PT_LC1SA1, pt_regs, lc1sa1);
OFFSET(_PT_M1M0, pt_regs, m1m0);
OFFSET(_PT_PREDSUSR, pt_regs, predsusr);
OFFSET(_PT_EVREC, pt_regs, hvmer);
OFFSET(_PT_ER_VMEL, pt_regs, hvmer.vmel);
OFFSET(_PT_ER_VMEST, pt_regs, hvmer.vmest);
OFFSET(_PT_ER_VMPSP, pt_regs, hvmer.vmpsp);
OFFSET(_PT_ER_VMBADVA, pt_regs, hvmer.vmbadva);
DEFINE(_PT_REGS_SIZE, sizeof(struct pt_regs));
BLANK();
COMMENT("Hexagon thread_info definitions");
OFFSET(_THREAD_INFO_FLAGS, thread_info, flags);
OFFSET(_THREAD_INFO_PT_REGS, thread_info, regs);
OFFSET(_THREAD_INFO_SP, thread_info, sp);
DEFINE(_THREAD_SIZE, THREAD_SIZE);
BLANK();
COMMENT("Hexagon hexagon_switch_stack definitions");
OFFSET(_SWITCH_R1716, hexagon_switch_stack, r1716);
OFFSET(_SWITCH_R1918, hexagon_switch_stack, r1918);
OFFSET(_SWITCH_R2120, hexagon_switch_stack, r2120);
OFFSET(_SWITCH_R2322, hexagon_switch_stack, r2322);
OFFSET(_SWITCH_R2524, hexagon_switch_stack, r2524);
OFFSET(_SWITCH_R2726, hexagon_switch_stack, r2726);
OFFSET(_SWITCH_FP, hexagon_switch_stack, fp);
OFFSET(_SWITCH_LR, hexagon_switch_stack, lr);
DEFINE(_SWITCH_STACK_SIZE, sizeof(struct hexagon_switch_stack));
BLANK();
COMMENT("Hexagon task_struct definitions");
OFFSET(_TASK_THREAD_INFO, task_struct, stack);
OFFSET(_TASK_STRUCT_THREAD, task_struct, thread);
COMMENT("Hexagon thread_struct definitions");
OFFSET(_THREAD_STRUCT_SWITCH_SP, thread_struct, switch_sp);
return 0;
}
/*
* DMA implementation for Hexagon
*
* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*/
#include <linux/dma-mapping.h>
#include <linux/bootmem.h>
#include <linux/genalloc.h>
#include <asm/dma-mapping.h>
struct dma_map_ops *dma_ops;
EXPORT_SYMBOL(dma_ops);
int bad_dma_address; /* globals are automatically initialized to zero */
int dma_supported(struct device *dev, u64 mask)
{
if (mask == DMA_BIT_MASK(32))
return 1;
else
return 0;
}
EXPORT_SYMBOL(dma_supported);
int dma_set_mask(struct device *dev, u64 mask)
{
if (!dev->dma_mask || !dma_supported(dev, mask))
return -EIO;
*dev->dma_mask = mask;
return 0;
}
EXPORT_SYMBOL(dma_set_mask);
static struct gen_pool *coherent_pool;
/* Allocates from a pool of uncached memory that was reserved at boot time */
void *hexagon_dma_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_addr, gfp_t flag)
{
void *ret;
if (coherent_pool == NULL) {
coherent_pool = gen_pool_create(PAGE_SHIFT, -1);
if (coherent_pool == NULL)
panic("Can't create %s() memory pool!", __func__);
else
gen_pool_add(coherent_pool,
(PAGE_OFFSET + (max_low_pfn << PAGE_SHIFT)),
hexagon_coherent_pool_size, -1);
}
ret = (void *) gen_pool_alloc(coherent_pool, size);
if (ret) {
memset(ret, 0, size);
*dma_addr = (dma_addr_t) (ret - PAGE_OFFSET);
} else
*dma_addr = ~0;
return ret;
}
static void hexagon_free_coherent(struct device *dev, size_t size, void *vaddr,
dma_addr_t dma_addr)
{
gen_pool_free(coherent_pool, (unsigned long) vaddr, size);
}
static int check_addr(const char *name, struct device *hwdev,
dma_addr_t bus, size_t size)
{
if (hwdev && hwdev->dma_mask && !dma_capable(hwdev, bus, size)) {
if (*hwdev->dma_mask >= DMA_BIT_MASK(32))
printk(KERN_ERR
"%s: overflow %Lx+%zu of device mask %Lx\n",
name, (long long)bus, size,
(long long)*hwdev->dma_mask);
return 0;
}
return 1;
}
static int hexagon_map_sg(struct device *hwdev, struct scatterlist *sg,
int nents, enum dma_data_direction dir,
struct dma_attrs *attrs)
{
struct scatterlist *s;
int i;
WARN_ON(nents == 0 || sg[0].length == 0);
for_each_sg(sg, s, nents, i) {
s->dma_address = sg_phys(s);
if (!check_addr("map_sg", hwdev, s->dma_address, s->length))
return 0;
s->dma_length = s->length;
flush_dcache_range(PAGE_OFFSET + s->dma_address,
PAGE_OFFSET + s->dma_address + s->length);
}
return nents;
}
/*
* address is virtual
*/
static inline void dma_sync(void *addr, size_t size,
enum dma_data_direction dir)
{
switch (dir) {
case DMA_TO_DEVICE:
hexagon_clean_dcache_range((unsigned long) addr,
(unsigned long) addr + size);
break;
case DMA_FROM_DEVICE:
hexagon_inv_dcache_range((unsigned long) addr,
(unsigned long) addr + size);
break;
case DMA_BIDIRECTIONAL:
flush_dcache_range((unsigned long) addr,
(unsigned long) addr + size);
break;
default:
BUG();
}
}
static inline void *dma_addr_to_virt(dma_addr_t dma_addr)
{
return phys_to_virt((unsigned long) dma_addr);
}
/**
* hexagon_map_page() - maps an address for device DMA
* @dev: pointer to DMA device
* @page: pointer to page struct of DMA memory
* @offset: offset within page
* @size: size of memory to map
* @dir: transfer direction
* @attrs: pointer to DMA attrs (not used)
*
* Called to map a memory address to a DMA address prior
* to accesses to/from device.
*
* We don't particularly have many hoops to jump through
* so far. Straight translation between phys and virtual.
*
* DMA is not cache coherent so sync is necessary; this
* seems to be a convenient place to do it.
*
*/
static dma_addr_t hexagon_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size,
enum dma_data_direction dir,
struct dma_attrs *attrs)
{
dma_addr_t bus = page_to_phys(page) + offset;
WARN_ON(size == 0);
if (!check_addr("map_single", dev, bus, size))
return bad_dma_address;
dma_sync(dma_addr_to_virt(bus), size, dir);
return bus;
}
static void hexagon_sync_single_for_cpu(struct device *dev,
dma_addr_t dma_handle, size_t size,
enum dma_data_direction dir)
{
dma_sync(dma_addr_to_virt(dma_handle), size, dir);
}
static void hexagon_sync_single_for_device(struct device *dev,
dma_addr_t dma_handle, size_t size,
enum dma_data_direction dir)
{
dma_sync(dma_addr_to_virt(dma_handle), size, dir);
}
struct dma_map_ops hexagon_dma_ops = {
.alloc_coherent = hexagon_dma_alloc_coherent,
.free_coherent = hexagon_free_coherent,
.map_sg = hexagon_map_sg,
.map_page = hexagon_map_page,
.sync_single_for_cpu = hexagon_sync_single_for_cpu,
.sync_single_for_device = hexagon_sync_single_for_device,
.is_phys = 1,
};
void __init hexagon_dma_init(void)
{
if (dma_ops)
return;
dma_ops = &hexagon_dma_ops;
}
/*
* Early kernel startup code for Hexagon
*
* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
*
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*/
#include <linux/linkage.h>
#include <linux/init.h>
#include <asm/asm-offsets.h>
#include <asm/mem-layout.h>
#include <asm/vm_mmu.h>
#include <asm/page.h>
__INIT
ENTRY(stext)
/*
* VMM will already have set up true vector page, MMU, etc.
* To set up initial kernel identity map, we have to pass
* the VMM a pointer to some canonical page tables. In
* this implementation, we're assuming that we've got
* them precompiled. Generate value in R24, as we'll need
* it again shortly.
*/
r24.L = #LO(swapper_pg_dir)
r24.H = #HI(swapper_pg_dir)
/*
* Symbol is kernel segment address, but we need
* the logical/physical address.
*/
r24 = asl(r24, #2)
r24 = lsr(r24, #2)
r0 = r24
/*
* Initialize a 16MB PTE to make the virtual and physical
* addresses where the kernel was loaded be identical.
*/
#define PTE_BITS ( __HVM_PTE_R | __HVM_PTE_W | __HVM_PTE_X \
| __HEXAGON_C_WB_L2 << 6 \
| __HVM_PDE_S_4MB)
r1 = pc
r2.H = #0xffc0
r2.L = #0x0000
r1 = and(r1,r2) /* round PC to 4MB boundary */
r2 = lsr(r1, #22) /* 4MB page number */
r2 = asl(r2, #2) /* times sizeof(PTE) (4bytes) */
r0 = add(r0,r2) /* r0 = address of correct PTE */
r2 = #PTE_BITS
r1 = add(r1,r2) /* r1 = 4MB PTE for the first entry */
r2.h = #0x0040
r2.l = #0x0000 /* 4MB */
memw(r0 ++ #4) = r1
r1 = add(r1, r2)
memw(r0 ++ #4) = r1
r0 = r24
/*
* The subroutine wrapper around the virtual instruction touches
* no memory, so we should be able to use it even here.
*/
call __vmnewmap;
/* Jump into virtual address range. */
r31.h = #hi(__head_s_vaddr_target)
r31.l = #lo(__head_s_vaddr_target)
jumpr r31
/* Insert trippy space effects. */
__head_s_vaddr_target:
/*
* Tear down VA=PA translation now that we are running
* in the desgnated kernel segments.
*/
r0 = #__HVM_PDE_S_INVALID
r1 = r24
loop0(1f,#0x100)
1:
{
memw(R1 ++ #4) = R0
}:endloop0
r0 = r24
call __vmnewmap
/* Go ahead and install the trap0 return so angel calls work */
r0.h = #hi(_K_provisional_vec)
r0.l = #lo(_K_provisional_vec)
call __vmsetvec
/*
* OK, at this point we should start to be much more careful,
* we're going to enter C code and start touching memory
* in all sorts of places.
* This means:
* SGP needs to be OK
* Need to lock shared resources
* A bunch of other things that will cause
* all kinds of painful bugs
*/
/*
* Stack pointer should be pointed at the init task's
* thread stack, which should have been declared in arch/init_task.c.
* So uhhhhh...
* It's accessible via the init_thread_union, which is a union
* of a thread_info struct and a stack; of course, the top
* of the stack is not for you. The end of the stack
* is simply init_thread_union + THREAD_SIZE.
*/
{r29.H = #HI(init_thread_union); r0.H = #HI(_THREAD_SIZE); }
{r29.L = #LO(init_thread_union); r0.L = #LO(_THREAD_SIZE); }
/* initialize the register used to point to current_thread_info */
/* Fixme: THREADINFO_REG can't be R2 because of that memset thing. */
{r29 = add(r29,r0); THREADINFO_REG = r29; }
/* Hack: zero bss; */
{ r0.L = #LO(__bss_start); r1 = #0; r2.l = #LO(__bss_stop); }
{ r0.H = #HI(__bss_start); r2.h = #HI(__bss_stop); }
r2 = sub(r2,r0);
call memset;
/* Time to make the doughnuts. */
call start_kernel
/*
* Should not reach here.
*/
1:
jump 1b
.p2align PAGE_SHIFT
ENTRY(external_cmdline_buffer)
.fill _PAGE_SIZE,1,0
.data
.p2align PAGE_SHIFT
ENTRY(empty_zero_page)
.fill _PAGE_SIZE,1,0
/*
* Export of symbols defined in assembly files and/or libgcc.
*
* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*/
#include <asm/hexagon_vm.h>
#include <asm/uaccess.h>
EXPORT_SYMBOL(__copy_from_user_hexagon);
EXPORT_SYMBOL(__copy_to_user_hexagon);
EXPORT_SYMBOL(__vmgetie);
EXPORT_SYMBOL(__vmsetie);
EXPORT_SYMBOL(memcpy);
EXPORT_SYMBOL(memset);
#define DECLARE_EXPORT(name) \
extern void name(void); EXPORT_SYMBOL(name)
/* Symbols found in libgcc that assorted kernel modules need */
DECLARE_EXPORT(__hexagon_memcpy_likely_aligned_min32bytes_mult8bytes);
DECLARE_EXPORT(__hexagon_divsi3);
DECLARE_EXPORT(__hexagon_modsi3);
DECLARE_EXPORT(__hexagon_udivsi3);
DECLARE_EXPORT(__hexagon_umodsi3);
/*
* Init task definition
*
* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*/
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/init_task.h>
#include <linux/fs.h>
#include <linux/mqueue.h>
#include <asm/thread_info.h>
#include <asm/uaccess.h>
#include <asm/pgtable.h>
static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
/*
* Initial thread structure.
*
* We need to make sure that this is 8192-byte aligned due to the
* way process stacks are handled. This is done by making sure
* the linker maps this in the .text segment right after head.S,
* and making head.S ensure the proper alignment.
*/
union thread_union init_thread_union
__attribute__((__section__(".data.init_task"),
__aligned__(THREAD_SIZE))) = {
INIT_THREAD_INFO(init_task)
};
/*
* Initial task structure.
*
* All other task structs will be allocated on slabs in fork.c
*/
struct task_struct init_task = INIT_TASK(init_task);
EXPORT_SYMBOL(init_task);
/*
* First-level interrupt controller model for Hexagon.
*
* Copyright (c) 2010-2011 Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*/
#include <linux/interrupt.h>
#include <asm/irq.h>
#include <asm/hexagon_vm.h>
static void mask_irq(struct irq_data *data)
{
__vmintop_locdis((long) data->irq);
}
static void mask_irq_num(unsigned int irq)
{
__vmintop_locdis((long) irq);
}
static void unmask_irq(struct irq_data *data)
{
__vmintop_locen((long) data->irq);
}
/* This is actually all we need for handle_fasteoi_irq */
static void eoi_irq(struct irq_data *data)
{
__vmintop_globen((long) data->irq);
}
/* Power mamangement wake call. We don't need this, however,
* if this is absent, then an -ENXIO error is returned to the
* msm_serial driver, and it fails to correctly initialize.
* This is a bug in the msm_serial driver, but, for now, we
* work around it here, by providing this bogus handler.
* XXX FIXME!!! remove this when msm_serial is fixed.
*/
static int set_wake(struct irq_data *data, unsigned int on)
{
return 0;
}
static struct irq_chip hexagon_irq_chip = {
.name = "HEXAGON",
.irq_mask = mask_irq,
.irq_unmask = unmask_irq,
.irq_set_wake = set_wake,
.irq_eoi = eoi_irq
};
/**
* The hexagon core comes with a first-level interrupt controller
* with 32 total possible interrupts. When the core is embedded
* into different systems/platforms, it is typically wrapped by
* macro cells that provide one or more second-level interrupt
* controllers that are cascaded into one or more of the first-level
* interrupts handled here. The precise wiring of these other
* irqs varies from platform to platform, and are set up & configured
* in the platform-specific files.
*
* The first-level interrupt controller is wrapped by the VM, which
* virtualizes the interrupt controller for us. It provides a very
* simple, fast & efficient API, and so the fasteoi handler is
* appropriate for this case.
*/
void __init init_IRQ(void)
{
int irq;
for (irq = 0; irq < HEXAGON_CPUINTS; irq++) {
mask_irq_num(irq);
irq_set_chip_and_handler(irq, &hexagon_irq_chip,
handle_fasteoi_irq);
}
}
/*
* arch/hexagon/kernel/kgdb.c - Hexagon KGDB Support
*
* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*/
#include <linux/kdebug.h>
#include <linux/kgdb.h>
/* All registers are 4 bytes, for now */
#define GDB_SIZEOF_REG 4
/* The register names are used during printing of the regs;
* Keep these at three letters to pretty-print. */
struct dbg_reg_def_t dbg_reg_def[DBG_MAX_REG_NUM] = {
{ " r0", GDB_SIZEOF_REG, offsetof(struct pt_regs, r00)},
{ " r1", GDB_SIZEOF_REG, offsetof(struct pt_regs, r01)},
{ " r2", GDB_SIZEOF_REG, offsetof(struct pt_regs, r02)},
{ " r3", GDB_SIZEOF_REG, offsetof(struct pt_regs, r03)},
{ " r4", GDB_SIZEOF_REG, offsetof(struct pt_regs, r04)},
{ " r5", GDB_SIZEOF_REG, offsetof(struct pt_regs, r05)},
{ " r6", GDB_SIZEOF_REG, offsetof(struct pt_regs, r06)},
{ " r7", GDB_SIZEOF_REG, offsetof(struct pt_regs, r07)},
{ " r8", GDB_SIZEOF_REG, offsetof(struct pt_regs, r08)},
{ " r9", GDB_SIZEOF_REG, offsetof(struct pt_regs, r09)},
{ "r10", GDB_SIZEOF_REG, offsetof(struct pt_regs, r10)},
{ "r11", GDB_SIZEOF_REG, offsetof(struct pt_regs, r11)},
{ "r12", GDB_SIZEOF_REG, offsetof(struct pt_regs, r12)},
{ "r13", GDB_SIZEOF_REG, offsetof(struct pt_regs, r13)},
{ "r14", GDB_SIZEOF_REG, offsetof(struct pt_regs, r14)},
{ "r15", GDB_SIZEOF_REG, offsetof(struct pt_regs, r15)},
{ "r16", GDB_SIZEOF_REG, offsetof(struct pt_regs, r16)},
{ "r17", GDB_SIZEOF_REG, offsetof(struct pt_regs, r17)},
{ "r18", GDB_SIZEOF_REG, offsetof(struct pt_regs, r18)},
{ "r19", GDB_SIZEOF_REG, offsetof(struct pt_regs, r19)},
{ "r20", GDB_SIZEOF_REG, offsetof(struct pt_regs, r20)},
{ "r21", GDB_SIZEOF_REG, offsetof(struct pt_regs, r21)},
{ "r22", GDB_SIZEOF_REG, offsetof(struct pt_regs, r22)},
{ "r23", GDB_SIZEOF_REG, offsetof(struct pt_regs, r23)},
{ "r24", GDB_SIZEOF_REG, offsetof(struct pt_regs, r24)},
{ "r25", GDB_SIZEOF_REG, offsetof(struct pt_regs, r25)},
{ "r26", GDB_SIZEOF_REG, offsetof(struct pt_regs, r26)},
{ "r27", GDB_SIZEOF_REG, offsetof(struct pt_regs, r27)},
{ "r28", GDB_SIZEOF_REG, offsetof(struct pt_regs, r28)},
{ "r29", GDB_SIZEOF_REG, offsetof(struct pt_regs, r29)},
{ "r30", GDB_SIZEOF_REG, offsetof(struct pt_regs, r30)},
{ "r31", GDB_SIZEOF_REG, offsetof(struct pt_regs, r31)},
{ "usr", GDB_SIZEOF_REG, offsetof(struct pt_regs, usr)},
{ "preds", GDB_SIZEOF_REG, offsetof(struct pt_regs, preds)},
{ " m0", GDB_SIZEOF_REG, offsetof(struct pt_regs, m0)},
{ " m1", GDB_SIZEOF_REG, offsetof(struct pt_regs, m1)},
{ "sa0", GDB_SIZEOF_REG, offsetof(struct pt_regs, sa0)},
{ "sa1", GDB_SIZEOF_REG, offsetof(struct pt_regs, sa1)},
{ "lc0", GDB_SIZEOF_REG, offsetof(struct pt_regs, lc0)},
{ "lc1", GDB_SIZEOF_REG, offsetof(struct pt_regs, lc1)},
{ " gp", GDB_SIZEOF_REG, offsetof(struct pt_regs, gp)},
{ "ugp", GDB_SIZEOF_REG, offsetof(struct pt_regs, ugp)},
{ "psp", GDB_SIZEOF_REG, offsetof(struct pt_regs, hvmer.vmpsp)},
{ "elr", GDB_SIZEOF_REG, offsetof(struct pt_regs, hvmer.vmel)},
{ "est", GDB_SIZEOF_REG, offsetof(struct pt_regs, hvmer.vmest)},
{ "badva", GDB_SIZEOF_REG, offsetof(struct pt_regs, hvmer.vmbadva)},
{ "restart_r0", GDB_SIZEOF_REG, offsetof(struct pt_regs, restart_r0)},
{ "syscall_nr", GDB_SIZEOF_REG, offsetof(struct pt_regs, syscall_nr)},
};
struct kgdb_arch arch_kgdb_ops = {
/* trap0(#0xDB) 0x0cdb0054 */
.gdb_bpt_instr = {0x54, 0x00, 0xdb, 0x0c},
};
char *dbg_get_reg(int regno, void *mem, struct pt_regs *regs)
{
if (regno >= DBG_MAX_REG_NUM || regno < 0)
return NULL;
*((unsigned long *) mem) = *((unsigned long *) ((void *)regs +
dbg_reg_def[regno].offset));
return dbg_reg_def[regno].name;
}
int dbg_set_reg(int regno, void *mem, struct pt_regs *regs)
{
if (regno >= DBG_MAX_REG_NUM || regno < 0)
return -EINVAL;
*((unsigned long *) ((void *)regs + dbg_reg_def[regno].offset)) =
*((unsigned long *) mem);
return 0;
}
void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long pc)
{
instruction_pointer(regs) = pc;
}
#ifdef CONFIG_SMP
/**
* kgdb_roundup_cpus - Get other CPUs into a holding pattern
* @flags: Current IRQ state
*
* On SMP systems, we need to get the attention of the other CPUs
* and get them be in a known state. This should do what is needed
* to get the other CPUs to call kgdb_wait(). Note that on some arches,
* the NMI approach is not used for rounding up all the CPUs. For example,
* in case of MIPS, smp_call_function() is used to roundup CPUs. In
* this case, we have to make sure that interrupts are enabled before
* calling smp_call_function(). The argument to this function is
* the flags that will be used when restoring the interrupts. There is
* local_irq_save() call before kgdb_roundup_cpus().
*
* On non-SMP systems, this is not called.
*/
static void hexagon_kgdb_nmi_hook(void *ignored)
{
kgdb_nmicallback(raw_smp_processor_id(), get_irq_regs());
}
void kgdb_roundup_cpus(unsigned long flags)
{
local_irq_enable();
smp_call_function(hexagon_kgdb_nmi_hook, NULL, 0);
local_irq_disable();
}
#endif
/* Not yet working */
void sleeping_thread_to_gdb_regs(unsigned long *gdb_regs,
struct task_struct *task)
{
struct pt_regs *thread_regs;
if (task == NULL)
return;
/* Initialize to zero */
memset(gdb_regs, 0, NUMREGBYTES);
/* Otherwise, we have only some registers from switch_to() */
thread_regs = task_pt_regs(task);
gdb_regs[0] = thread_regs->r00;
}
/**
* kgdb_arch_handle_exception - Handle architecture specific GDB packets.
* @vector: The error vector of the exception that happened.
* @signo: The signal number of the exception that happened.
* @err_code: The error code of the exception that happened.
* @remcom_in_buffer: The buffer of the packet we have read.
* @remcom_out_buffer: The buffer of %BUFMAX bytes to write a packet into.
* @regs: The &struct pt_regs of the current process.
*
* This function MUST handle the 'c' and 's' command packets,
* as well packets to set / remove a hardware breakpoint, if used.
* If there are additional packets which the hardware needs to handle,
* they are handled here. The code should return -1 if it wants to
* process more packets, and a %0 or %1 if it wants to exit from the
* kgdb callback.
*
* Not yet working.
*/
int kgdb_arch_handle_exception(int vector, int signo, int err_code,
char *remcom_in_buffer, char *remcom_out_buffer,
struct pt_regs *linux_regs)
{
switch (remcom_in_buffer[0]) {
case 's':
case 'c':
return 0;
}
/* Stay in the debugger. */
return -1;
}
static int __kgdb_notify(struct die_args *args, unsigned long cmd)
{
/* cpu roundup */
if (atomic_read(&kgdb_active) != -1) {
kgdb_nmicallback(smp_processor_id(), args->regs);
return NOTIFY_STOP;
}
if (user_mode(args->regs))
return NOTIFY_DONE;
if (kgdb_handle_exception(args->trapnr & 0xff, args->signr, args->err,
args->regs))
return NOTIFY_DONE;
return NOTIFY_STOP;
}
static int
kgdb_notify(struct notifier_block *self, unsigned long cmd, void *ptr)
{
unsigned long flags;
int ret;
local_irq_save(flags);
ret = __kgdb_notify(ptr, cmd);
local_irq_restore(flags);
return ret;
}
static struct notifier_block kgdb_notifier = {
.notifier_call = kgdb_notify,
/*
* Lowest-prio notifier priority, we want to be notified last:
*/
.priority = -INT_MAX,
};
/**
* kgdb_arch_init - Perform any architecture specific initalization.
*
* This function will handle the initalization of any architecture
* specific callbacks.
*/
int kgdb_arch_init(void)
{
return register_die_notifier(&kgdb_notifier);
}
/**
* kgdb_arch_exit - Perform any architecture specific uninitalization.
*
* This function will handle the uninitalization of any architecture
* specific callbacks, for dynamic registration and unregistration.
*/
void kgdb_arch_exit(void)
{
unregister_die_notifier(&kgdb_notifier);
}
/*
* Kernel module loader for Hexagon
*
* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*/
#include <asm/module.h>
#include <linux/elf.h>
#include <linux/module.h>
#include <linux/moduleloader.h>
#include <linux/vmalloc.h>
#if 0
#define DEBUGP printk
#else
#define DEBUGP(fmt , ...)
#endif
/*
* module_frob_arch_sections - tweak got/plt sections.
* @hdr - pointer to elf header
* @sechdrs - pointer to elf load section headers
* @secstrings - symbol names
* @mod - pointer to module
*/
int module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
char *secstrings,
struct module *mod)
{
unsigned int i;
int found = 0;
/* Look for .plt and/or .got.plt and/or .init.plt sections */
for (i = 0; i < hdr->e_shnum; i++) {
DEBUGP("Section %d is %s\n", i,
secstrings + sechdrs[i].sh_name);
if (strcmp(secstrings + sechdrs[i].sh_name, ".plt") == 0)
found = i+1;
if (strcmp(secstrings + sechdrs[i].sh_name, ".got.plt") == 0)
found = i+1;
if (strcmp(secstrings + sechdrs[i].sh_name, ".rela.plt") == 0)
found = i+1;
}
/* At this time, we don't support modules comiled with -shared */
if (found) {
printk(KERN_WARNING
"Module '%s' contains unexpected .plt/.got sections.\n",
mod->name);
/* return -ENOEXEC; */
}
return 0;
}
/*
* apply_relocate_add - perform rela relocations.
* @sechdrs - pointer to section headers
* @strtab - some sort of start address?
* @symindex - symbol index offset or something?
* @relsec - address to relocate to?
* @module - pointer to module
*
* Perform rela relocations.
*/
int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab,
unsigned int symindex, unsigned int relsec,
struct module *module)
{
unsigned int i;
Elf32_Sym *sym;
uint32_t *location;
uint32_t value;
unsigned int nrelocs = sechdrs[relsec].sh_size / sizeof(Elf32_Rela);
Elf32_Rela *rela = (void *)sechdrs[relsec].sh_addr;
Elf32_Word sym_info = sechdrs[relsec].sh_info;
Elf32_Sym *sym_base = (Elf32_Sym *) sechdrs[symindex].sh_addr;
void *loc_base = (void *) sechdrs[sym_info].sh_addr;
DEBUGP("Applying relocations in section %u to section %u base=%p\n",
relsec, sym_info, loc_base);
for (i = 0; i < nrelocs; i++) {
/* Symbol to relocate */
sym = sym_base + ELF32_R_SYM(rela[i].r_info);
/* Where to make the change */
location = loc_base + rela[i].r_offset;
/* `Everything is relative'. */
value = sym->st_value + rela[i].r_addend;
DEBUGP("%d: value=%08x loc=%p reloc=%d symbol=%s\n",
i, value, location, ELF32_R_TYPE(rela[i].r_info),
sym->st_name ?
&strtab[sym->st_name] : "(anonymous)");
switch (ELF32_R_TYPE(rela[i].r_info)) {
case R_HEXAGON_B22_PCREL: {
int dist = (int)(value - (uint32_t)location);
if ((dist < -0x00800000) ||
(dist >= 0x00800000)) {
printk(KERN_ERR
"%s: %s: %08x=%08x-%08x %s\n",
module->name,
"R_HEXAGON_B22_PCREL reloc out of range",
dist, value, (uint32_t)location,
sym->st_name ?
&strtab[sym->st_name] : "(anonymous)");
return -ENOEXEC;
}
DEBUGP("B22_PCREL contents: %08X.\n", *location);
*location &= ~0x01ff3fff;
*location |= 0x00003fff & dist;
*location |= 0x01ff0000 & (dist<<2);
DEBUGP("Contents after reloc: %08x\n", *location);
break;
}
case R_HEXAGON_HI16:
value = (value>>16) & 0xffff;
/* fallthrough */
case R_HEXAGON_LO16:
*location &= ~0x00c03fff;
*location |= value & 0x3fff;
*location |= (value & 0xc000) << 8;
break;
case R_HEXAGON_32:
*location = value;
break;
case R_HEXAGON_32_PCREL:
*location = value - (uint32_t)location;
break;
case R_HEXAGON_PLT_B22_PCREL:
case R_HEXAGON_GOTOFF_LO16:
case R_HEXAGON_GOTOFF_HI16:
printk(KERN_ERR "%s: GOT/PLT relocations unsupported\n",
module->name);
return -ENOEXEC;
default:
printk(KERN_ERR "%s: unknown relocation: %u\n",
module->name,
ELF32_R_TYPE(rela[i].r_info));
return -ENOEXEC;
}
}
return 0;
}
/*
* Process creation support for Hexagon
*
* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*/
#include <linux/sched.h>
#include <linux/types.h>
#include <linux/module.h>
#include <linux/tick.h>
#include <linux/uaccess.h>
#include <linux/slab.h>
/*
* Kernel thread creation. The desired kernel function is "wrapped"
* in the kernel_thread_helper function, which does cleanup
* afterwards.
*/
static void __noreturn kernel_thread_helper(void *arg, int (*fn)(void *))
{
do_exit(fn(arg));
}
int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags)
{
struct pt_regs regs;
memset(&regs, 0, sizeof(regs));
/*
* Yes, we're exploting illicit knowledge of the ABI here.
*/
regs.r00 = (unsigned long) arg;
regs.r01 = (unsigned long) fn;
pt_set_elr(&regs, (unsigned long)kernel_thread_helper);
pt_set_kmode(&regs);
return do_fork(flags|CLONE_VM|CLONE_UNTRACED, 0, &regs, 0, NULL, NULL);
}
EXPORT_SYMBOL(kernel_thread);
/*
* Program thread launch. Often defined as a macro in processor.h,
* but we're shooting for a small footprint and it's not an inner-loop
* performance-critical operation.
*
* The Hexagon ABI specifies that R28 is zero'ed before program launch,
* so that gets automatically done here. If we ever stop doing that here,
* we'll probably want to define the ELF_PLAT_INIT macro.
*/
void start_thread(struct pt_regs *regs, unsigned long pc, unsigned long sp)
{
/* Set to run with user-mode data segmentation */
set_fs(USER_DS);
/* We want to zero all data-containing registers. Is this overkill? */
memset(regs, 0, sizeof(*regs));
/* We might want to also zero all Processor registers here */
pt_set_usermode(regs);
pt_set_elr(regs, pc);
pt_set_rte_sp(regs, sp);
}
/*
* Spin, or better still, do a hardware or VM wait instruction
* If hardware or VM offer wait termination even though interrupts
* are disabled.
*/
static void default_idle(void)
{
__vmwait();
}
void (*idle_sleep)(void) = default_idle;
void cpu_idle(void)
{
while (1) {
tick_nohz_stop_sched_tick(1);
local_irq_disable();
while (!need_resched()) {
idle_sleep();
/* interrupts wake us up, but aren't serviced */
local_irq_enable(); /* service interrupt */
local_irq_disable();
}
local_irq_enable();
tick_nohz_restart_sched_tick();
schedule();
}
}
/*
* Return saved PC of a blocked thread
*/
unsigned long thread_saved_pc(struct task_struct *tsk)
{
return 0;
}
/*
* Copy architecture-specific thread state
*/
int copy_thread(unsigned long clone_flags, unsigned long usp,
unsigned long unused, struct task_struct *p,
struct pt_regs *regs)
{
struct thread_info *ti = task_thread_info(p);
struct hexagon_switch_stack *ss;
struct pt_regs *childregs;
asmlinkage void ret_from_fork(void);
childregs = (struct pt_regs *) (((unsigned long) ti + THREAD_SIZE) -
sizeof(*childregs));
memcpy(childregs, regs, sizeof(*childregs));
ti->regs = childregs;
/*
* Establish kernel stack pointer and initial PC for new thread
*/
ss = (struct hexagon_switch_stack *) ((unsigned long) childregs -
sizeof(*ss));
ss->lr = (unsigned long)ret_from_fork;
p->thread.switch_sp = ss;
/* If User mode thread, set pt_reg stack pointer as per parameter */
if (user_mode(childregs)) {
pt_set_rte_sp(childregs, usp);
/* Child sees zero return value */
childregs->r00 = 0;
/*
* The clone syscall has the C signature:
* int [r0] clone(int flags [r0],
* void *child_frame [r1],
* void *parent_tid [r2],
* void *child_tid [r3],
* void *thread_control_block [r4]);
* ugp is used to provide TLS support.
*/
if (clone_flags & CLONE_SETTLS)
childregs->ugp = childregs->r04;
/*
* Parent sees new pid -- not necessary, not even possible at
* this point in the fork process
* Might also want to set things like ti->addr_limit
*/
} else {
/*
* If kernel thread, resume stack is kernel stack base.
* Note that this is pointer arithmetic on pt_regs *
*/
pt_set_rte_sp(childregs, (unsigned long)(childregs + 1));
/*
* We need the current thread_info fast path pointer
* set up in pt_regs. The register to be used is
* parametric for assembler code, but the mechanism
* doesn't drop neatly into C. Needs to be fixed.
*/
childregs->THREADINFO_REG = (unsigned long) ti;
}
/*
* thread_info pointer is pulled out of task_struct "stack"
* field on switch_to.
*/
p->stack = (void *)ti;
return 0;
}
/*
* Release any architecture-specific resources locked by thread
*/
void release_thread(struct task_struct *dead_task)
{
}
/*
* Free any architecture-specific thread data structures, etc.
*/
void exit_thread(void)
{
}
/*
* Some archs flush debug and FPU info here
*/
void flush_thread(void)
{
}
/*
* The "wait channel" terminology is archaic, but what we want
* is an identification of the point at which the scheduler
* was invoked by a blocked thread.
*/
unsigned long get_wchan(struct task_struct *p)
{
unsigned long fp, pc;
unsigned long stack_page;
int count = 0;
if (!p || p == current || p->state == TASK_RUNNING)
return 0;
stack_page = (unsigned long)task_stack_page(p);
fp = ((struct hexagon_switch_stack *)p->thread.switch_sp)->fp;
do {
if (fp < (stack_page + sizeof(struct thread_info)) ||
fp >= (THREAD_SIZE - 8 + stack_page))
return 0;
pc = ((unsigned long *)fp)[1];
if (!in_sched_functions(pc))
return pc;
fp = *(unsigned long *) fp;
} while (count++ < 16);
return 0;
}
/*
* Borrowed from PowerPC -- basically allow smaller kernel stacks if we
* go crazy with the page sizes.
*/
#if THREAD_SHIFT < PAGE_SHIFT
static struct kmem_cache *thread_info_cache;
struct thread_info *alloc_thread_info_node(struct task_struct *tsk, int node)
{
struct thread_info *ti;
ti = kmem_cache_alloc_node(thread_info_cache, GFP_KERNEL, node);
if (unlikely(ti == NULL))
return NULL;
#ifdef CONFIG_DEBUG_STACK_USAGE
memset(ti, 0, THREAD_SIZE);
#endif
return ti;
}
void free_thread_info(struct thread_info *ti)
{
kmem_cache_free(thread_info_cache, ti);
}
/* Weak symbol; called by init/main.c */
void thread_info_cache_init(void)
{
thread_info_cache = kmem_cache_create("thread_info", THREAD_SIZE,
THREAD_SIZE, 0, NULL);
BUG_ON(thread_info_cache == NULL);
}
#endif /* THREAD_SHIFT < PAGE_SHIFT */
/*
* Required placeholder.
*/
int dump_fpu(struct pt_regs *regs, elf_fpregset_t *fpu)
{
return 0;
}
/*
* Ptrace support for Hexagon
*
* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*/
#include <generated/compile.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/smp.h>
#include <linux/errno.h>
#include <linux/ptrace.h>
#include <linux/regset.h>
#include <linux/user.h>
#include <asm/system.h>
#include <asm/user.h>
static int genregs_get(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
void *kbuf, void __user *ubuf)
{
int ret;
unsigned int dummy;
struct pt_regs *regs = task_pt_regs(target);
if (!regs)
return -EIO;
/* The general idea here is that the copyout must happen in
* exactly the same order in which the userspace expects these
* regs. Now, the sequence in userspace does not match the
* sequence in the kernel, so everything past the 32 gprs
* happens one at a time.
*/
ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
&regs->r00, 0, 32*sizeof(unsigned long));
#define ONEXT(KPT_REG, USR_REG) \
if (!ret) \
ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, \
KPT_REG, offsetof(struct user_regs_struct, USR_REG), \
offsetof(struct user_regs_struct, USR_REG) + \
sizeof(unsigned long));
/* Must be exactly same sequence as struct user_regs_struct */
ONEXT(&regs->sa0, sa0);
ONEXT(&regs->lc0, lc0);
ONEXT(&regs->sa1, sa1);
ONEXT(&regs->lc1, lc1);
ONEXT(&regs->m0, m0);
ONEXT(&regs->m1, m1);
ONEXT(&regs->usr, usr);
ONEXT(&regs->preds, p3_0);
ONEXT(&regs->gp, gp);
ONEXT(&regs->ugp, ugp);
ONEXT(&pt_elr(regs), pc);
dummy = pt_cause(regs);
ONEXT(&dummy, cause);
ONEXT(&pt_badva(regs), badva);
/* Pad the rest with zeros, if needed */
if (!ret)
ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
offsetof(struct user_regs_struct, pad1), -1);
return ret;
}
static int genregs_set(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
const void *kbuf, const void __user *ubuf)
{
int ret;
unsigned long bucket;
struct pt_regs *regs = task_pt_regs(target);
if (!regs)
return -EIO;
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
&regs->r00, 0, 32*sizeof(unsigned long));
#define INEXT(KPT_REG, USR_REG) \
if (!ret) \
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, \
KPT_REG, offsetof(struct user_regs_struct, USR_REG), \
offsetof(struct user_regs_struct, USR_REG) + \
sizeof(unsigned long));
/* Must be exactly same sequence as struct user_regs_struct */
INEXT(&regs->sa0, sa0);
INEXT(&regs->lc0, lc0);
INEXT(&regs->sa1, sa1);
INEXT(&regs->lc1, lc1);
INEXT(&regs->m0, m0);
INEXT(&regs->m1, m1);
INEXT(&regs->usr, usr);
INEXT(&regs->preds, p3_0);
INEXT(&regs->gp, gp);
INEXT(&regs->ugp, ugp);
INEXT(&pt_elr(regs), pc);
/* CAUSE and BADVA aren't writeable. */
INEXT(&bucket, cause);
INEXT(&bucket, badva);
/* Ignore the rest, if needed */
if (!ret)
ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
offsetof(struct user_regs_struct, pad1), -1);
if (ret)
return ret;
/*
* This is special; SP is actually restored by the VM via the
* special event record which is set by the special trap.
*/
regs->hvmer.vmpsp = regs->r29;
return 0;
}
enum hexagon_regset {
REGSET_GENERAL,
};
static const struct user_regset hexagon_regsets[] = {
[REGSET_GENERAL] = {
.core_note_type = NT_PRSTATUS,
.n = ELF_NGREG,
.size = sizeof(unsigned long),
.align = sizeof(unsigned long),
.get = genregs_get,
.set = genregs_set,
},
};
static const struct user_regset_view hexagon_user_view = {
.name = UTS_MACHINE,
.e_machine = ELF_ARCH,
.ei_osabi = ELF_OSABI,
.regsets = hexagon_regsets,
.n = ARRAY_SIZE(hexagon_regsets)
};
const struct user_regset_view *task_user_regset_view(struct task_struct *task)
{
return &hexagon_user_view;
}
void ptrace_disable(struct task_struct *child)
{
/* Boilerplate - resolves to null inline if no HW single-step */
user_disable_single_step(child);
}
long arch_ptrace(struct task_struct *child, long request,
unsigned long addr, unsigned long data)
{
return ptrace_request(child, request, addr, data);
}
/*
* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*/
#include <linux/smp.h>
#include <asm/hexagon_vm.h>
void machine_power_off(void)
{
smp_send_stop();
__vmstop();
}
void machine_halt(void)
{
}
void machine_restart(char *cmd)
{
}
void pm_power_off(void)
{
}
/*
* Arch related setup for Hexagon
*
* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*/
#include <linux/init.h>
#include <linux/bootmem.h>
#include <linux/mmzone.h>
#include <linux/mm.h>
#include <linux/seq_file.h>
#include <linux/console.h>
#include <linux/of_fdt.h>
#include <asm/io.h>
#include <asm/sections.h>
#include <asm/setup.h>
#include <asm/processor.h>
#include <asm/hexagon_vm.h>
#include <asm/vm_mmu.h>
#include <asm/time.h>
#ifdef CONFIG_OF
#include <asm/prom.h>
#endif
char cmd_line[COMMAND_LINE_SIZE];
static char default_command_line[COMMAND_LINE_SIZE] __initdata = CONFIG_CMDLINE;
int on_simulator;
void __cpuinit calibrate_delay(void)
{
loops_per_jiffy = thread_freq_mhz * 1000000 / HZ;
}
/*
* setup_arch - high level architectural setup routine
* @cmdline_p: pointer to pointer to command-line arguments
*/
void __init setup_arch(char **cmdline_p)
{
char *p = &external_cmdline_buffer;
/*
* These will eventually be pulled in via either some hypervisor
* or devicetree description. Hardwiring for now.
*/
pcycle_freq_mhz = 600;
thread_freq_mhz = 100;
sleep_clk_freq = 32000;
/*
* Set up event bindings to handle exceptions and interrupts.
*/
__vmsetvec(_K_VM_event_vector);
/*
* Simulator has a few differences from the hardware.
* For now, check uninitialized-but-mapped memory
* prior to invoking setup_arch_memory().
*/
if (*(int *)((unsigned long)_end + 8) == 0x1f1f1f1f)
on_simulator = 1;
else
on_simulator = 0;
if (p[0] != '\0')
strlcpy(boot_command_line, p, COMMAND_LINE_SIZE);
else
strlcpy(boot_command_line, default_command_line,
COMMAND_LINE_SIZE);
/*
* boot_command_line and the value set up by setup_arch
* are both picked up by the init code. If no reason to
* make them different, pass the same pointer back.
*/
strlcpy(cmd_line, boot_command_line, COMMAND_LINE_SIZE);
*cmdline_p = cmd_line;
parse_early_param();
setup_arch_memory();
#ifdef CONFIG_SMP
smp_start_cpus();
#endif
}
/*
* Functions for dumping CPU info via /proc
* Probably should move to kernel/proc.c or something.
*/
static void *c_start(struct seq_file *m, loff_t *pos)
{
return *pos < nr_cpu_ids ? (void *)((unsigned long) *pos + 1) : NULL;
}
static void *c_next(struct seq_file *m, void *v, loff_t *pos)
{
++*pos;
return c_start(m, pos);
}
static void c_stop(struct seq_file *m, void *v)
{
}
/*
* Eventually this will dump information about
* CPU properties like ISA level, TLB size, etc.
*/
static int show_cpuinfo(struct seq_file *m, void *v)
{
int cpu = (unsigned long) v - 1;
seq_printf(m, "processor\t: %d\n", cpu);
seq_printf(m, "model name\t: Hexagon Virtual Machine\n");
seq_printf(m, "BogoMips\t: %lu.%02lu\n",
(loops_per_jiffy * HZ) / 500000,
((loops_per_jiffy * HZ) / 5000) % 100);
seq_printf(m, "\n");
return 0;
}
const struct seq_operations cpuinfo_op = {
.start = &c_start,
.next = &c_next,
.stop = &c_stop,
.show = &show_cpuinfo,
};
/*
* Signal support for Hexagon processor
*
* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*/
#include <linux/linkage.h>
#include <linux/syscalls.h>
#include <linux/freezer.h>
#include <linux/tracehook.h>
#include <asm/registers.h>
#include <asm/thread_info.h>
#include <asm/unistd.h>
#include <asm/uaccess.h>
#include <asm/ucontext.h>
#include <asm/cacheflush.h>
#include <asm/signal.h>
#include <asm/vdso.h>
#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
struct rt_sigframe {
unsigned long tramp[2];
struct siginfo info;
struct ucontext uc;
};
static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
size_t frame_size)
{
unsigned long sp = regs->r29;
/* Switch to signal stack if appropriate */
if ((ka->sa.sa_flags & SA_ONSTACK) && (sas_ss_flags(sp) == 0))
sp = current->sas_ss_sp + current->sas_ss_size;
return (void __user *)((sp - frame_size) & ~(sizeof(long long) - 1));
}
static int setup_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc)
{
unsigned long tmp;
int err = 0;
err |= copy_to_user(&sc->sc_regs.r0, &regs->r00,
32*sizeof(unsigned long));
err |= __put_user(regs->sa0, &sc->sc_regs.sa0);
err |= __put_user(regs->lc0, &sc->sc_regs.lc0);
err |= __put_user(regs->sa1, &sc->sc_regs.sa1);
err |= __put_user(regs->lc1, &sc->sc_regs.lc1);
err |= __put_user(regs->m0, &sc->sc_regs.m0);
err |= __put_user(regs->m1, &sc->sc_regs.m1);
err |= __put_user(regs->usr, &sc->sc_regs.usr);
err |= __put_user(regs->preds, &sc->sc_regs.p3_0);
err |= __put_user(regs->gp, &sc->sc_regs.gp);
err |= __put_user(regs->ugp, &sc->sc_regs.ugp);
tmp = pt_elr(regs); err |= __put_user(tmp, &sc->sc_regs.pc);
tmp = pt_cause(regs); err |= __put_user(tmp, &sc->sc_regs.cause);
tmp = pt_badva(regs); err |= __put_user(tmp, &sc->sc_regs.badva);
return err;
}
static int restore_sigcontext(struct pt_regs *regs,
struct sigcontext __user *sc)
{
unsigned long tmp;
int err = 0;
err |= copy_from_user(&regs->r00, &sc->sc_regs.r0,
32 * sizeof(unsigned long));
err |= __get_user(regs->sa0, &sc->sc_regs.sa0);
err |= __get_user(regs->lc0, &sc->sc_regs.lc0);
err |= __get_user(regs->sa1, &sc->sc_regs.sa1);
err |= __get_user(regs->lc1, &sc->sc_regs.lc1);
err |= __get_user(regs->m0, &sc->sc_regs.m0);
err |= __get_user(regs->m1, &sc->sc_regs.m1);
err |= __get_user(regs->usr, &sc->sc_regs.usr);
err |= __get_user(regs->preds, &sc->sc_regs.p3_0);
err |= __get_user(regs->gp, &sc->sc_regs.gp);
err |= __get_user(regs->ugp, &sc->sc_regs.ugp);
err |= __get_user(tmp, &sc->sc_regs.pc); pt_set_elr(regs, tmp);
return err;
}
/*
* Setup signal stack frame with siginfo structure
*/
static int setup_rt_frame(int signr, struct k_sigaction *ka, siginfo_t *info,
sigset_t *set, struct pt_regs *regs)
{
int err = 0;
struct rt_sigframe __user *frame;
struct hexagon_vdso *vdso = current->mm->context.vdso;
frame = get_sigframe(ka, regs, sizeof(struct rt_sigframe));
if (!access_ok(VERIFY_WRITE, frame, sizeof(struct rt_sigframe)))
goto sigsegv;
if (copy_siginfo_to_user(&frame->info, info))
goto sigsegv;
/* The on-stack signal trampoline is no longer executed;
* however, the libgcc signal frame unwinding code checks for
* the presence of these two numeric magic values.
*/
err |= __put_user(0x7800d166, &frame->tramp[0]);
err |= __put_user(0x5400c004, &frame->tramp[1]);
err |= setup_sigcontext(regs, &frame->uc.uc_mcontext);
err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
if (err)
goto sigsegv;
/* Load r0/r1 pair with signumber/siginfo pointer... */
regs->r0100 = ((unsigned long long)((unsigned long)&frame->info) << 32)
| (unsigned long long)signr;
regs->r02 = (unsigned long) &frame->uc;
regs->r31 = (unsigned long) vdso->rt_signal_trampoline;
pt_psp(regs) = (unsigned long) frame;
pt_set_elr(regs, (unsigned long)ka->sa.sa_handler);
return 0;
sigsegv:
force_sigsegv(signr, current);
return -EFAULT;
}
/*
* Setup invocation of signal handler
*/
static int handle_signal(int sig, siginfo_t *info, struct k_sigaction *ka,
sigset_t *oldset, struct pt_regs *regs)
{
int rc;
/*
* If we're handling a signal that aborted a system call,
* set up the error return value before adding the signal
* frame to the stack.
*/
if (regs->syscall_nr >= 0) {
switch (regs->r00) {
case -ERESTART_RESTARTBLOCK:
case -ERESTARTNOHAND:
regs->r00 = -EINTR;
break;
case -ERESTARTSYS:
if (!(ka->sa.sa_flags & SA_RESTART)) {
regs->r00 = -EINTR;
break;
}
/* Fall through */
case -ERESTARTNOINTR:
regs->r06 = regs->syscall_nr;
pt_set_elr(regs, pt_elr(regs) - 4);
regs->r00 = regs->restart_r0;
break;
default:
break;
}
}
/*
* Set up the stack frame; not doing the SA_SIGINFO thing. We
* only set up the rt_frame flavor.
*/
rc = setup_rt_frame(sig, ka, info, oldset, regs);
/* If there was an error on setup, no signal was delivered. */
if (rc)
return rc;
spin_lock_irq(&current->sighand->siglock);
sigorsets(&current->blocked, &current->blocked, &ka->sa.sa_mask);
if (!(ka->sa.sa_flags & SA_NODEFER))
sigaddset(&current->blocked, sig);
recalc_sigpending();
spin_unlock_irq(&current->sighand->siglock);
return 0;
}
/*
* Called from return-from-event code.
*/
static void do_signal(struct pt_regs *regs)
{
struct k_sigaction sigact;
siginfo_t info;
int signo;
if (!user_mode(regs))
return;
if (try_to_freeze())
goto no_signal;
signo = get_signal_to_deliver(&info, &sigact, regs, NULL);
if (signo > 0) {
sigset_t *oldset;
if (test_thread_flag(TIF_RESTORE_SIGMASK))
oldset = &current->saved_sigmask;
else
oldset = &current->blocked;
if (handle_signal(signo, &info, &sigact, oldset, regs) == 0) {
/*
* Successful delivery case. The saved sigmask is
* stored in the signal frame, and will be restored
* by sigreturn. We can clear the TIF flag.
*/
clear_thread_flag(TIF_RESTORE_SIGMASK);
tracehook_signal_handler(signo, &info, &sigact, regs,
test_thread_flag(TIF_SINGLESTEP));
}
return;
}
no_signal:
/*
* If we came from a system call, handle the restart.
*/
if (regs->syscall_nr >= 0) {
switch (regs->r00) {
case -ERESTARTNOHAND:
case -ERESTARTSYS:
case -ERESTARTNOINTR:
regs->r06 = regs->syscall_nr;
break;
case -ERESTART_RESTARTBLOCK:
regs->r06 = __NR_restart_syscall;
break;
default:
goto no_restart;
}
pt_set_elr(regs, pt_elr(regs) - 4);
regs->r00 = regs->restart_r0;
}
no_restart:
/* If there's no signal to deliver, put the saved sigmask back */
if (test_thread_flag(TIF_RESTORE_SIGMASK)) {
clear_thread_flag(TIF_RESTORE_SIGMASK);
sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
}
}
void do_notify_resume(struct pt_regs *regs, unsigned long thread_info_flags)
{
if (thread_info_flags & _TIF_SIGPENDING)
do_signal(regs);
if (thread_info_flags & _TIF_NOTIFY_RESUME) {
clear_thread_flag(TIF_NOTIFY_RESUME);
if (current->replacement_session_keyring)
key_replace_session_keyring();
}
}
/*
* Architecture-specific wrappers for signal-related system calls
*/
asmlinkage int sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss)
{
struct pt_regs *regs = current_thread_info()->regs;
return do_sigaltstack(uss, uoss, regs->r29);
}
asmlinkage int sys_rt_sigreturn(void)
{
struct pt_regs *regs = current_thread_info()->regs;
struct rt_sigframe __user *frame;
sigset_t blocked;
frame = (struct rt_sigframe __user *)pt_psp(regs);
if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
goto badframe;
if (__copy_from_user(&blocked, &frame->uc.uc_sigmask, sizeof(blocked)))
goto badframe;
sigdelsetmask(&blocked, ~_BLOCKABLE);
spin_lock_irq(&current->sighand->siglock);
current->blocked = blocked;
recalc_sigpending();
spin_unlock_irq(&current->sighand->siglock);
if (restore_sigcontext(regs, &frame->uc.uc_mcontext))
goto badframe;
/* Restore the user's stack as well */
pt_psp(regs) = regs->r29;
/*
* Leave a trace in the stack frame that this was a sigreturn.
* If the system call is to replay, we've already restored the
* number in the GPR slot and it will be regenerated on the
* new system call trap entry. Note that if restore_sigcontext()
* did something other than a bulk copy of the pt_regs struct,
* we could avoid this assignment by simply not overwriting
* regs->syscall_nr.
*/
regs->syscall_nr = __NR_rt_sigreturn;
/*
* If we were meticulous, we'd only call this if we knew that
* we were actually going to use an alternate stack, and we'd
* consider any error to be fatal. What we do here, in common
* with many other architectures, is call it blindly and only
* consider the -EFAULT return case to be proof of a problem.
*/
if (do_sigaltstack(&frame->uc.uc_stack, NULL, pt_psp(regs)) == -EFAULT)
goto badframe;
return 0;
badframe:
force_sig(SIGSEGV, current);
return 0;
}
/*
* SMP support for Hexagon
*
* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*/
#include <linux/err.h>
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/percpu.h>
#include <linux/sched.h>
#include <linux/smp.h>
#include <linux/spinlock.h>
#include <asm/system.h> /* xchg */
#include <asm/time.h> /* timer_interrupt */
#include <asm/hexagon_vm.h>
#define BASE_IPI_IRQ 26
/*
* cpu_possible_map needs to be filled out prior to setup_per_cpu_areas
* (which is prior to any of our smp_prepare_cpu crap), in order to set
* up the... per_cpu areas.
*/
struct ipi_data {
unsigned long bits;
};
static DEFINE_PER_CPU(struct ipi_data, ipi_data);
static inline void __handle_ipi(unsigned long *ops, struct ipi_data *ipi,
int cpu)
{
unsigned long msg = 0;
do {
msg = find_next_bit(ops, BITS_PER_LONG, msg+1);
switch (msg) {
case IPI_TIMER:
ipi_timer();
break;
case IPI_CALL_FUNC:
generic_smp_call_function_interrupt();
break;
case IPI_CALL_FUNC_SINGLE:
generic_smp_call_function_single_interrupt();
break;
case IPI_CPU_STOP:
/*
* call vmstop()
*/
__vmstop();
break;
case IPI_RESCHEDULE:
scheduler_ipi();
break;
}
} while (msg < BITS_PER_LONG);
}
/* Used for IPI call from other CPU's to unmask int */
void smp_vm_unmask_irq(void *info)
{
__vmintop_locen((long) info);
}
/*
* This is based on Alpha's IPI stuff.
* Supposed to take (int, void*) as args now.
* Specifically, first arg is irq, second is the irq_desc.
*/
irqreturn_t handle_ipi(int irq, void *desc)
{
int cpu = smp_processor_id();
struct ipi_data *ipi = &per_cpu(ipi_data, cpu);
unsigned long ops;
while ((ops = xchg(&ipi->bits, 0)) != 0)
__handle_ipi(&ops, ipi, cpu);
return IRQ_HANDLED;
}
void send_ipi(const struct cpumask *cpumask, enum ipi_message_type msg)
{
unsigned long flags;
unsigned long cpu;
unsigned long retval;
local_irq_save(flags);
for_each_cpu(cpu, cpumask) {
struct ipi_data *ipi = &per_cpu(ipi_data, cpu);
set_bit(msg, &ipi->bits);
/* Possible barrier here */
retval = __vmintop_post(BASE_IPI_IRQ+cpu);
if (retval != 0) {
printk(KERN_ERR "interrupt %ld not configured?\n",
BASE_IPI_IRQ+cpu);
}
}
local_irq_restore(flags);
}
static struct irqaction ipi_intdesc = {
.handler = handle_ipi,
.flags = IRQF_TRIGGER_RISING,
.name = "ipi_handler"
};
void __init smp_prepare_boot_cpu(void)
{
}
/*
* interrupts should already be disabled from the VM
* SP should already be correct; need to set THREADINFO_REG
* to point to current thread info
*/
void __cpuinit start_secondary(void)
{
unsigned int cpu;
unsigned long thread_ptr;
/* Calculate thread_info pointer from stack pointer */
__asm__ __volatile__(
"%0 = SP;\n"
: "=r" (thread_ptr)
);
thread_ptr = thread_ptr & ~(THREAD_SIZE-1);
__asm__ __volatile__(
QUOTED_THREADINFO_REG " = %0;\n"
:
: "r" (thread_ptr)
);
/* Set the memory struct */
atomic_inc(&init_mm.mm_count);
current->active_mm = &init_mm;
cpu = smp_processor_id();
setup_irq(BASE_IPI_IRQ + cpu, &ipi_intdesc);
/* Register the clock_event dummy */
setup_percpu_clockdev();
printk(KERN_INFO "%s cpu %d\n", __func__, current_thread_info()->cpu);
set_cpu_online(cpu, true);
while (!cpumask_test_cpu(cpu, cpu_active_mask))
cpu_relax();
local_irq_enable();
cpu_idle();
}
/*
* called once for each present cpu
* apparently starts up the CPU and then
* maintains control until "cpu_online(cpu)" is set.
*/
int __cpuinit __cpu_up(unsigned int cpu)
{
struct task_struct *idle;
struct thread_info *thread;
void *stack_start;
/* Create new init task for the CPU */
idle = fork_idle(cpu);
if (IS_ERR(idle))
panic(KERN_ERR "fork_idle failed\n");
thread = (struct thread_info *)idle->stack;
thread->cpu = cpu;
/* Boot to the head. */
stack_start = ((void *) thread) + THREAD_SIZE;
__vmstart(start_secondary, stack_start);
while (!cpu_isset(cpu, cpu_online_map))
barrier();
return 0;
}
void __init smp_cpus_done(unsigned int max_cpus)
{
}
void __init smp_prepare_cpus(unsigned int max_cpus)
{
int i;
/*
* should eventually have some sort of machine
* descriptor that has this stuff
*/
/* Right now, let's just fake it. */
for (i = 0; i < max_cpus; i++)
cpu_set(i, cpu_present_map);
/* Also need to register the interrupts for IPI */
if (max_cpus > 1)
setup_irq(BASE_IPI_IRQ, &ipi_intdesc);
}
void smp_send_reschedule(int cpu)
{
send_ipi(cpumask_of(cpu), IPI_RESCHEDULE);
}
void smp_send_stop(void)
{
struct cpumask targets;
cpumask_copy(&targets, cpu_online_mask);
cpumask_clear_cpu(smp_processor_id(), &targets);
send_ipi(&targets, IPI_CPU_STOP);
}
void arch_send_call_function_single_ipi(int cpu)
{
send_ipi(cpumask_of(cpu), IPI_CALL_FUNC_SINGLE);
}
void arch_send_call_function_ipi_mask(const struct cpumask *mask)
{
send_ipi(mask, IPI_CALL_FUNC);
}
int setup_profiling_timer(unsigned int multiplier)
{
return -EINVAL;
}
void smp_start_cpus(void)
{
int i;
for (i = 0; i < NR_CPUS; i++)
cpu_set(i, cpu_possible_map);
}
/*
* Stacktrace support for Hexagon
*
* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*/
#include <linux/sched.h>
#include <linux/stacktrace.h>
#include <linux/thread_info.h>
#include <linux/module.h>
register unsigned long current_frame_pointer asm("r30");
struct stackframe {
unsigned long fp;
unsigned long rets;
};
/*
* Save stack-backtrace addresses into a stack_trace buffer.
*/
void save_stack_trace(struct stack_trace *trace)
{
unsigned long low, high;
unsigned long fp;
struct stackframe *frame;
int skip = trace->skip;
low = (unsigned long)task_stack_page(current);
high = low + THREAD_SIZE;
fp = current_frame_pointer;
while (fp >= low && fp <= (high - sizeof(*frame))) {
frame = (struct stackframe *)fp;
if (skip) {
skip--;
} else {
trace->entries[trace->nr_entries++] = frame->rets;
if (trace->nr_entries >= trace->max_entries)
break;
}
/*
* The next frame must be at a higher address than the
* current frame.
*/
low = fp + sizeof(*frame);
fp = frame->fp;
}
}
EXPORT_SYMBOL_GPL(save_stack_trace);
/*
* Hexagon system calls
*
* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*/
#include <linux/file.h>
#include <linux/fs.h>
#include <linux/linkage.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/syscalls.h>
#include <linux/unistd.h>
#include <asm/mman.h>
#include <asm/registers.h>
/*
* System calls with architecture-specific wrappers.
* See signal.c for signal-related system call wrappers.
*/
asmlinkage int sys_execve(char __user *ufilename,
const char __user *const __user *argv,
const char __user *const __user *envp)
{
struct pt_regs *pregs = current_thread_info()->regs;
char *filename;
int retval;
filename = getname(ufilename);
retval = PTR_ERR(filename);
if (IS_ERR(filename))
return retval;
retval = do_execve(filename, argv, envp, pregs);
putname(filename);
return retval;
}
asmlinkage int sys_clone(unsigned long clone_flags, unsigned long newsp,
unsigned long parent_tidp, unsigned long child_tidp)
{
struct pt_regs *pregs = current_thread_info()->regs;
if (!newsp)
newsp = pregs->SP;
return do_fork(clone_flags, newsp, pregs, 0, (int __user *)parent_tidp,
(int __user *)child_tidp);
}
/*
* Do a system call from the kernel, so as to have a proper pt_regs
* and recycle the sys_execvpe infrustructure.
*/
int kernel_execve(const char *filename,
const char *const argv[], const char *const envp[])
{
register unsigned long __a0 asm("r0") = (unsigned long) filename;
register unsigned long __a1 asm("r1") = (unsigned long) argv;
register unsigned long __a2 asm("r2") = (unsigned long) envp;
int retval;
__asm__ volatile(
" R6 = #%4;\n"
" trap0(#1);\n"
" %0 = R0;\n"
: "=r" (retval)
: "r" (__a0), "r" (__a1), "r" (__a2), "i" (__NR_execve)
);
return retval;
}
EXPORT_SYMBOL(kernel_execve);
/*
* System call table for Hexagon
*
* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*/
#include <linux/syscalls.h>
#include <linux/signal.h>
#include <linux/unistd.h>
#include <asm/syscall.h>
#undef __SYSCALL
#define __SYSCALL(nr, call) [nr] = (call),
void *sys_call_table[__NR_syscalls] = {
#include <asm/unistd.h>
};
/*
* Time related functions for Hexagon architecture
*
* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*/
#include <linux/init.h>
#include <linux/clockchips.h>
#include <linux/clocksource.h>
#include <linux/interrupt.h>
#include <linux/err.h>
#include <linux/platform_device.h>
#include <linux/ioport.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <asm/timer-regs.h>
#include <asm/hexagon_vm.h>
/*
* For the clocksource we need:
* pcycle frequency (600MHz)
* For the loops_per_jiffy we need:
* thread/cpu frequency (100MHz)
* And for the timer, we need:
* sleep clock rate
*/
cycles_t pcycle_freq_mhz;
cycles_t thread_freq_mhz;
cycles_t sleep_clk_freq;
static struct resource rtos_timer_resources[] = {
{
.start = RTOS_TIMER_REGS_ADDR,
.end = RTOS_TIMER_REGS_ADDR+PAGE_SIZE-1,
.flags = IORESOURCE_MEM,
},
};
static struct platform_device rtos_timer_device = {
.name = "rtos_timer",
.id = -1,
.num_resources = ARRAY_SIZE(rtos_timer_resources),
.resource = rtos_timer_resources,
};
/* A lot of this stuff should move into a platform specific section. */
struct adsp_hw_timer_struct {
u32 match; /* Match value */
u32 count;
u32 enable; /* [1] - CLR_ON_MATCH_EN, [0] - EN */
u32 clear; /* one-shot register that clears the count */
};
/* Look for "TCX0" for related constants. */
static __iomem struct adsp_hw_timer_struct *rtos_timer;
static cycle_t timer_get_cycles(struct clocksource *cs)
{
return (cycle_t) __vmgettime();
}
static struct clocksource hexagon_clocksource = {
.name = "pcycles",
.rating = 250,
.read = timer_get_cycles,
.mask = CLOCKSOURCE_MASK(64),
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
};
static int set_next_event(unsigned long delta, struct clock_event_device *evt)
{
/* Assuming the timer will be disabled when we enter here. */
iowrite32(1, &rtos_timer->clear);
iowrite32(0, &rtos_timer->clear);
iowrite32(delta, &rtos_timer->match);
iowrite32(1 << TIMER_ENABLE, &rtos_timer->enable);
return 0;
}
/*
* Sets the mode (periodic, shutdown, oneshot, etc) of a timer.
*/
static void set_mode(enum clock_event_mode mode,
struct clock_event_device *evt)
{
switch (mode) {
case CLOCK_EVT_MODE_SHUTDOWN:
/* XXX implement me */
default:
break;
}
}
#ifdef CONFIG_SMP
/* Broadcast mechanism */
static void broadcast(const struct cpumask *mask)
{
send_ipi(mask, IPI_TIMER);
}
#endif
static struct clock_event_device hexagon_clockevent_dev = {
.name = "clockevent",
.features = CLOCK_EVT_FEAT_ONESHOT,
.rating = 400,
.irq = RTOS_TIMER_INT,
.set_next_event = set_next_event,
.set_mode = set_mode,
#ifdef CONFIG_SMP
.broadcast = broadcast,
#endif
};
#ifdef CONFIG_SMP
static DEFINE_PER_CPU(struct clock_event_device, clock_events);
void setup_percpu_clockdev(void)
{
int cpu = smp_processor_id();
struct clock_event_device *ce_dev = &hexagon_clockevent_dev;
struct clock_event_device *dummy_clock_dev =
&per_cpu(clock_events, cpu);
memcpy(dummy_clock_dev, ce_dev, sizeof(*dummy_clock_dev));
INIT_LIST_HEAD(&dummy_clock_dev->list);
dummy_clock_dev->features = CLOCK_EVT_FEAT_DUMMY;
dummy_clock_dev->cpumask = cpumask_of(cpu);
dummy_clock_dev->mode = CLOCK_EVT_MODE_UNUSED;
clockevents_register_device(dummy_clock_dev);
}
/* Called from smp.c for each CPU's timer ipi call */
void ipi_timer(void)
{
int cpu = smp_processor_id();
struct clock_event_device *ce_dev = &per_cpu(clock_events, cpu);
ce_dev->event_handler(ce_dev);
}
#endif /* CONFIG_SMP */
static irqreturn_t timer_interrupt(int irq, void *devid)
{
struct clock_event_device *ce_dev = &hexagon_clockevent_dev;
iowrite32(0, &rtos_timer->enable);
ce_dev->event_handler(ce_dev);
return IRQ_HANDLED;
}
/* This should also be pulled from devtree */
static struct irqaction rtos_timer_intdesc = {
.handler = timer_interrupt,
.flags = IRQF_TIMER | IRQF_TRIGGER_RISING,
.name = "rtos_timer"
};
/*
* time_init_deferred - called by start_kernel to set up timer/clock source
*
* Install the IRQ handler for the clock, setup timers.
* This is done late, as that way, we can use ioremap().
*
* This runs just before the delay loop is calibrated, and
* is used for delay calibration.
*/
void __init time_init_deferred(void)
{
struct resource *resource = NULL;
struct clock_event_device *ce_dev = &hexagon_clockevent_dev;
struct device_node *dn;
struct resource r;
int err;
ce_dev->cpumask = cpu_all_mask;
if (!resource)
resource = rtos_timer_device.resource;
/* ioremap here means this has to run later, after paging init */
rtos_timer = ioremap(resource->start, resource->end
- resource->start + 1);
if (!rtos_timer) {
release_mem_region(resource->start, resource->end
- resource->start + 1);
}
clocksource_register_khz(&hexagon_clocksource, pcycle_freq_mhz * 1000);
/* Note: the sim generic RTOS clock is apparently really 18750Hz */
/*
* Last arg is some guaranteed seconds for which the conversion will
* work without overflow.
*/
clockevents_calc_mult_shift(ce_dev, sleep_clk_freq, 4);
ce_dev->max_delta_ns = clockevent_delta2ns(0x7fffffff, ce_dev);
ce_dev->min_delta_ns = clockevent_delta2ns(0xf, ce_dev);
#ifdef CONFIG_SMP
setup_percpu_clockdev();
#endif
clockevents_register_device(ce_dev);
setup_irq(ce_dev->irq, &rtos_timer_intdesc);
}
void __init time_init(void)
{
late_time_init = time_init_deferred;
}
/*
* This could become parametric or perhaps even computed at run-time,
* but for now we take the observed simulator jitter.
*/
static long long fudgefactor = 350; /* Maybe lower if kernel optimized. */
void __udelay(unsigned long usecs)
{
unsigned long long start = __vmgettime();
unsigned long long finish = (pcycle_freq_mhz * usecs) - fudgefactor;
while ((__vmgettime() - start) < finish)
cpu_relax(); /* not sure how this improves readability */
}
EXPORT_SYMBOL(__udelay);
/*
* CPU topology for Hexagon
*
* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*/
#include <linux/cpu.h>
#include <linux/cpumask.h>
#include <linux/init.h>
#include <linux/node.h>
#include <linux/nodemask.h>
#include <linux/percpu.h>
/* Swiped from MIPS. */
static DEFINE_PER_CPU(struct cpu, cpu_devices);
static int __init topology_init(void)
{
int i, ret;
for_each_present_cpu(i) {
/*
* register_cpu takes a per_cpu pointer and
* just points it at another per_cpu struct...
*/
ret = register_cpu(&per_cpu(cpu_devices, i), i);
if (ret)
printk(KERN_WARNING "topology_init: register_cpu %d "
"failed (%d)\n", i, ret);
}
return 0;
}
subsys_initcall(topology_init);
/*
* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*/
/*
* Trampoline sequences to be copied onto user stack.
* This consumes a little more space than hand-assembling
* immediate constants for use in C, but is more portable
* to future tweaks to the Hexagon instruction set.
*/
#include <asm/unistd.h>
/* Sig trampolines - call sys_sigreturn or sys_rt_sigreturn as appropriate */
/* plain sigreturn is gone. */
.globl __rt_sigtramp_template
__rt_sigtramp_template:
r6 = #__NR_rt_sigreturn;
trap0(#1);
/*
* Kernel traps/events for Hexagon processor
*
* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*/
#include <linux/init.h>
#include <linux/sched.h>
#include <linux/module.h>
#include <linux/kallsyms.h>
#include <linux/kdebug.h>
#include <linux/syscalls.h>
#include <linux/signal.h>
#include <linux/tracehook.h>
#include <asm/traps.h>
#include <asm/vm_fault.h>
#include <asm/syscall.h>
#include <asm/registers.h>
#include <asm/unistd.h>
#include <asm/sections.h>
#ifdef CONFIG_KGDB
# include <linux/kgdb.h>
#endif
#define TRAP_SYSCALL 1
#define TRAP_DEBUG 0xdb
void __init trap_init(void)
{
}
#ifdef CONFIG_GENERIC_BUG
/* Maybe should resemble arch/sh/kernel/traps.c ?? */
int is_valid_bugaddr(unsigned long addr)
{
return 1;
}
#endif /* CONFIG_GENERIC_BUG */
static const char *ex_name(int ex)
{
switch (ex) {
case HVM_GE_C_XPROT:
case HVM_GE_C_XUSER:
return "Execute protection fault";
case HVM_GE_C_RPROT:
case HVM_GE_C_RUSER:
return "Read protection fault";
case HVM_GE_C_WPROT:
case HVM_GE_C_WUSER:
return "Write protection fault";
case HVM_GE_C_XMAL:
return "Misaligned instruction";
case HVM_GE_C_RMAL:
return "Misaligned data load";
case HVM_GE_C_WMAL:
return "Misaligned data store";
case HVM_GE_C_INVI:
case HVM_GE_C_PRIVI:
return "Illegal instruction";
case HVM_GE_C_BUS:
return "Precise bus error";
case HVM_GE_C_CACHE:
return "Cache error";
case 0xdb:
return "Debugger trap";
default:
return "Unrecognized exception";
}
}
static void do_show_stack(struct task_struct *task, unsigned long *fp,
unsigned long ip)
{
int kstack_depth_to_print = 24;
unsigned long offset, size;
const char *name = NULL;
unsigned long *newfp;
unsigned long low, high;
char tmpstr[128];
char *modname;
int i;
if (task == NULL)
task = current;
printk(KERN_INFO "CPU#%d, %s/%d, Call Trace:\n",
raw_smp_processor_id(), task->comm,
task_pid_nr(task));
if (fp == NULL) {
if (task == current) {
asm("%0 = r30" : "=r" (fp));
} else {
fp = (unsigned long *)
((struct hexagon_switch_stack *)
task->thread.switch_sp)->fp;
}
}
if ((((unsigned long) fp) & 0x3) || ((unsigned long) fp < 0x1000)) {
printk(KERN_INFO "-- Corrupt frame pointer %p\n", fp);
return;
}
/* Saved link reg is one word above FP */
if (!ip)
ip = *(fp+1);
/* Expect kernel stack to be in-bounds */
low = (unsigned long)task_stack_page(task);
high = low + THREAD_SIZE - 8;
low += sizeof(struct thread_info);
for (i = 0; i < kstack_depth_to_print; i++) {
name = kallsyms_lookup(ip, &size, &offset, &modname, tmpstr);
printk(KERN_INFO "[%p] 0x%lx: %s + 0x%lx", fp, ip, name,
offset);
if (((unsigned long) fp < low) || (high < (unsigned long) fp))
printk(KERN_CONT " (FP out of bounds!)");
if (modname)
printk(KERN_CONT " [%s] ", modname);
printk(KERN_CONT "\n");
newfp = (unsigned long *) *fp;
if (((unsigned long) newfp) & 0x3) {
printk(KERN_INFO "-- Corrupt frame pointer %p\n",
newfp);
break;
}
/* Attempt to continue past exception. */
if (0 == newfp) {
struct pt_regs *regs = (struct pt_regs *) (((void *)fp)
+ 8);
if (regs->syscall_nr != -1) {
printk(KERN_INFO "-- trap0 -- syscall_nr: %ld",
regs->syscall_nr);
printk(KERN_CONT " psp: %lx elr: %lx\n",
pt_psp(regs), pt_elr(regs));
break;
} else {
/* really want to see more ... */
kstack_depth_to_print += 6;
printk(KERN_INFO "-- %s (0x%lx) badva: %lx\n",
ex_name(pt_cause(regs)), pt_cause(regs),
pt_badva(regs));
}
newfp = (unsigned long *) regs->r30;
ip = pt_elr(regs);
} else {
ip = *(newfp + 1);
}
/* If link reg is null, we are done. */
if (ip == 0x0)
break;
/* If newfp isn't larger, we're tracing garbage. */
if (newfp > fp)
fp = newfp;
else
break;
}
}
void show_stack(struct task_struct *task, unsigned long *fp)
{
/* Saved link reg is one word above FP */
do_show_stack(task, fp, 0);
}
void dump_stack(void)
{
unsigned long *fp;
asm("%0 = r30" : "=r" (fp));
show_stack(current, fp);
}
EXPORT_SYMBOL(dump_stack);
int die(const char *str, struct pt_regs *regs, long err)
{
static struct {
spinlock_t lock;
int counter;
} die = {
.lock = __SPIN_LOCK_UNLOCKED(die.lock),
.counter = 0
};
console_verbose();
oops_enter();
spin_lock_irq(&die.lock);
bust_spinlocks(1);
printk(KERN_EMERG "Oops: %s[#%d]:\n", str, ++die.counter);
if (notify_die(DIE_OOPS, str, regs, err, pt_cause(regs), SIGSEGV) ==
NOTIFY_STOP)
return 1;
print_modules();
show_regs(regs);
do_show_stack(current, &regs->r30, pt_elr(regs));
bust_spinlocks(0);
add_taint(TAINT_DIE);
spin_unlock_irq(&die.lock);
if (in_interrupt())
panic("Fatal exception in interrupt");
if (panic_on_oops)
panic("Fatal exception");
oops_exit();
do_exit(err);
return 0;
}
int die_if_kernel(char *str, struct pt_regs *regs, long err)
{
if (!user_mode(regs))
return die(str, regs, err);
else
return 0;
}
/*
* It's not clear that misaligned fetches are ever recoverable.
*/
static void misaligned_instruction(struct pt_regs *regs)
{
die_if_kernel("Misaligned Instruction", regs, 0);
force_sig(SIGBUS, current);
}
/*
* Misaligned loads and stores, on the other hand, can be
* emulated, and probably should be, some day. But for now
* they will be considered fatal.
*/
static void misaligned_data_load(struct pt_regs *regs)
{
die_if_kernel("Misaligned Data Load", regs, 0);
force_sig(SIGBUS, current);
}
static void misaligned_data_store(struct pt_regs *regs)
{
die_if_kernel("Misaligned Data Store", regs, 0);
force_sig(SIGBUS, current);
}
static void illegal_instruction(struct pt_regs *regs)
{
die_if_kernel("Illegal Instruction", regs, 0);
force_sig(SIGILL, current);
}
/*
* Precise bus errors may be recoverable with a a retry,
* but for now, treat them as irrecoverable.
*/
static void precise_bus_error(struct pt_regs *regs)
{
die_if_kernel("Precise Bus Error", regs, 0);
force_sig(SIGBUS, current);
}
/*
* If anything is to be done here other than panic,
* it will probably be complex and migrate to another
* source module. For now, just die.
*/
static void cache_error(struct pt_regs *regs)
{
die("Cache Error", regs, 0);
}
/*
* General exception handler
*/
void do_genex(struct pt_regs *regs)
{
/*
* Decode Cause and Dispatch
*/
switch (pt_cause(regs)) {
case HVM_GE_C_XPROT:
case HVM_GE_C_XUSER:
execute_protection_fault(regs);
break;
case HVM_GE_C_RPROT:
case HVM_GE_C_RUSER:
read_protection_fault(regs);
break;
case HVM_GE_C_WPROT:
case HVM_GE_C_WUSER:
write_protection_fault(regs);
break;
case HVM_GE_C_XMAL:
misaligned_instruction(regs);
break;
case HVM_GE_C_RMAL:
misaligned_data_load(regs);
break;
case HVM_GE_C_WMAL:
misaligned_data_store(regs);
break;
case HVM_GE_C_INVI:
case HVM_GE_C_PRIVI:
illegal_instruction(regs);
break;
case HVM_GE_C_BUS:
precise_bus_error(regs);
break;
case HVM_GE_C_CACHE:
cache_error(regs);
break;
default:
/* Halt and catch fire */
panic("Unrecognized exception 0x%lx\n", pt_cause(regs));
break;
}
}
/* Indirect system call dispatch */
long sys_syscall(void)
{
printk(KERN_ERR "sys_syscall invoked!\n");
return -ENOSYS;
}
void do_trap0(struct pt_regs *regs)
{
unsigned long syscallret = 0;
syscall_fn syscall;
switch (pt_cause(regs)) {
case TRAP_SYSCALL:
/* System call is trap0 #1 */
/* allow strace to catch syscall args */
if (unlikely(test_thread_flag(TIF_SYSCALL_TRACE) &&
tracehook_report_syscall_entry(regs)))
return; /* return -ENOSYS somewhere? */
/* Interrupts should be re-enabled for syscall processing */
__vmsetie(VM_INT_ENABLE);
/*
* System call number is in r6, arguments in r0..r5.
* Fortunately, no Linux syscall has more than 6 arguments,
* and Hexagon ABI passes first 6 arguments in registers.
* 64-bit arguments are passed in odd/even register pairs.
* Fortunately, we have no system calls that take more
* than three arguments with more than one 64-bit value.
* Should that change, we'd need to redesign to copy
* between user and kernel stacks.
*/
regs->syscall_nr = regs->r06;
/*
* GPR R0 carries the first parameter, and is also used
* to report the return value. We need a backup of
* the user's value in case we need to do a late restart
* of the system call.
*/
regs->restart_r0 = regs->r00;
if ((unsigned long) regs->syscall_nr >= __NR_syscalls) {
regs->r00 = -1;
} else {
syscall = (syscall_fn)
(sys_call_table[regs->syscall_nr]);
syscallret = syscall(regs->r00, regs->r01,
regs->r02, regs->r03,
regs->r04, regs->r05);
}
/*
* If it was a sigreturn system call, don't overwrite
* r0 value in stack frame with return value.
*
* __NR_sigreturn doesn't seem to exist in new unistd.h
*/
if (regs->syscall_nr != __NR_rt_sigreturn)
regs->r00 = syscallret;
/* allow strace to get the syscall return state */
if (unlikely(test_thread_flag(TIF_SYSCALL_TRACE)))
tracehook_report_syscall_exit(regs, 0);
break;
case TRAP_DEBUG:
/* Trap0 0xdb is debug breakpoint */
if (user_mode(regs)) {
struct siginfo info;
info.si_signo = SIGTRAP;
info.si_errno = 0;
/*
* Some architecures add some per-thread state
* to distinguish between breakpoint traps and
* trace traps. We may want to do that, and
* set the si_code value appropriately, or we
* may want to use a different trap0 flavor.
*/
info.si_code = TRAP_BRKPT;
info.si_addr = (void __user *) pt_elr(regs);
send_sig_info(SIGTRAP, &info, current);
} else {
#ifdef CONFIG_KGDB
kgdb_handle_exception(pt_cause(regs), SIGTRAP,
TRAP_BRKPT, regs);
#endif
}
break;
}
/* Ignore other trap0 codes for now, especially 0 (Angel calls) */
}
/*
* Machine check exception handler
*/
void do_machcheck(struct pt_regs *regs)
{
/* Halt and catch fire */
__vmstop();
}
/*
* vDSO implementation for Hexagon
*
* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*/
#include <linux/err.h>
#include <linux/mm.h>
#include <linux/vmalloc.h>
#include <asm/vdso.h>
static struct page *vdso_page;
/* Create a vDSO page holding the signal trampoline.
* We want this for a non-executable stack.
*/
static int __init vdso_init(void)
{
struct hexagon_vdso *vdso;
vdso_page = alloc_page(GFP_KERNEL);
if (!vdso_page)
panic("Cannot allocate vdso");
vdso = vmap(&vdso_page, 1, 0, PAGE_KERNEL);
if (!vdso)
panic("Cannot map vdso");
clear_page(vdso);
/* Install the signal trampoline; currently looks like this:
* r6 = #__NR_rt_sigreturn;
* trap0(#1);
*/
vdso->rt_signal_trampoline[0] = __rt_sigtramp_template[0];
vdso->rt_signal_trampoline[1] = __rt_sigtramp_template[1];
vunmap(vdso);
return 0;
}
arch_initcall(vdso_init);
/*
* Called from binfmt_elf. Create a VMA for the vDSO page.
*/
int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
{
int ret;
unsigned long vdso_base;
struct mm_struct *mm = current->mm;
down_write(&mm->mmap_sem);
/* Try to get it loaded right near ld.so/glibc. */
vdso_base = STACK_TOP;
vdso_base = get_unmapped_area(NULL, vdso_base, PAGE_SIZE, 0, 0);
if (IS_ERR_VALUE(vdso_base)) {
ret = vdso_base;
goto up_fail;
}
/* MAYWRITE to allow gdb to COW and set breakpoints. */
ret = install_special_mapping(mm, vdso_base, PAGE_SIZE,
VM_READ|VM_EXEC|
VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC|
VM_ALWAYSDUMP,
&vdso_page);
if (ret)
goto up_fail;
mm->context.vdso = (void *)vdso_base;
up_fail:
up_write(&mm->mmap_sem);
return ret;
}
const char *arch_vma_name(struct vm_area_struct *vma)
{
if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
return "[vdso]";
return NULL;
}
/*
* Event entry/exit for Hexagon
*
* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*/
#include <asm/asm-offsets.h> /* assembly-safer versions of C defines */
#include <asm/mem-layout.h> /* sigh, except for page_offset */
#include <asm/hexagon_vm.h>
#include <asm/thread_info.h>
/*
* Entry into guest-mode Linux under Hexagon Virtual Machine.
* Stack pointer points to event record - build pt_regs on top of it,
* set up a plausible C stack frame, and dispatch to the C handler.
* On return, do vmrte virtual instruction with SP where we started.
*
* VM Spec 0.5 uses a trap to fetch HVM record now.
*/
/*
* Save full register state, while setting up thread_info struct
* pointer derived from kernel stack pointer in THREADINFO_REG
* register, putting prior thread_info.regs pointer in a callee-save
* register (R24, which had better not ever be assigned to THREADINFO_REG),
* and updating thread_info.regs to point to current stack frame,
* so as to support nested events in kernel mode.
*
* As this is common code, we set the pt_regs system call number
* to -1 for all events. It will be replaced with the system call
* number in the case where we decode a system call (trap0(#1)).
*/
#define save_pt_regs()\
memd(R0 + #_PT_R3130) = R31:30; \
{ memw(R0 + #_PT_R2928) = R28; \
R31 = memw(R0 + #_PT_ER_VMPSP); }\
{ memw(R0 + #(_PT_R2928 + 4)) = R31; \
R31 = ugp; } \
{ memd(R0 + #_PT_R2726) = R27:26; \
R30 = gp ; } \
memd(R0 + #_PT_R2524) = R25:24; \
memd(R0 + #_PT_R2322) = R23:22; \
memd(R0 + #_PT_R2120) = R21:20; \
memd(R0 + #_PT_R1918) = R19:18; \
memd(R0 + #_PT_R1716) = R17:16; \
memd(R0 + #_PT_R1514) = R15:14; \
memd(R0 + #_PT_R1312) = R13:12; \
{ memd(R0 + #_PT_R1110) = R11:10; \
R15 = lc0; } \
{ memd(R0 + #_PT_R0908) = R9:8; \
R14 = sa0; } \
{ memd(R0 + #_PT_R0706) = R7:6; \
R13 = lc1; } \
{ memd(R0 + #_PT_R0504) = R5:4; \
R12 = sa1; } \
{ memd(R0 + #_PT_UGPGP) = R31:30; \
R11 = m1; \
R2.H = #HI(_THREAD_SIZE); } \
{ memd(R0 + #_PT_LC0SA0) = R15:14; \
R10 = m0; \
R2.L = #LO(_THREAD_SIZE); } \
{ memd(R0 + #_PT_LC1SA1) = R13:12; \
R15 = p3:0; \
R2 = neg(R2); } \
{ memd(R0 + #_PT_M1M0) = R11:10; \
R14 = usr; \
R2 = and(R0,R2); } \
{ memd(R0 + #_PT_PREDSUSR) = R15:14; \
THREADINFO_REG = R2; } \
{ r24 = memw(THREADINFO_REG + #_THREAD_INFO_PT_REGS); \
memw(THREADINFO_REG + #_THREAD_INFO_PT_REGS) = R0; \
R2 = #-1; } \
{ memw(R0 + #_PT_SYSCALL_NR) = R2; \
R30 = #0; }
/*
* Restore registers and thread_info.regs state. THREADINFO_REG
* is assumed to still be sane, and R24 to have been correctly
* preserved. Don't restore R29 (SP) until later.
*/
#define restore_pt_regs() \
{ memw(THREADINFO_REG + #_THREAD_INFO_PT_REGS) = R24; \
R15:14 = memd(R0 + #_PT_PREDSUSR); } \
{ R11:10 = memd(R0 + #_PT_M1M0); \
p3:0 = R15; } \
{ R13:12 = memd(R0 + #_PT_LC1SA1); \
usr = R14; } \
{ R15:14 = memd(R0 + #_PT_LC0SA0); \
m1 = R11; } \
{ R3:2 = memd(R0 + #_PT_R0302); \
m0 = R10; } \
{ R5:4 = memd(R0 + #_PT_R0504); \
lc1 = R13; } \
{ R7:6 = memd(R0 + #_PT_R0706); \
sa1 = R12; } \
{ R9:8 = memd(R0 + #_PT_R0908); \
lc0 = R15; } \
{ R11:10 = memd(R0 + #_PT_R1110); \
sa0 = R14; } \
{ R13:12 = memd(R0 + #_PT_R1312); \
R15:14 = memd(R0 + #_PT_R1514); } \
{ R17:16 = memd(R0 + #_PT_R1716); \
R19:18 = memd(R0 + #_PT_R1918); } \
{ R21:20 = memd(R0 + #_PT_R2120); \
R23:22 = memd(R0 + #_PT_R2322); } \
{ R25:24 = memd(R0 + #_PT_R2524); \
R27:26 = memd(R0 + #_PT_R2726); } \
R31:30 = memd(R0 + #_PT_UGPGP); \
{ R28 = memw(R0 + #_PT_R2928); \
ugp = R31; } \
{ R31:30 = memd(R0 + #_PT_R3130); \
gp = R30; }
/*
* Clears off enough space for the rest of pt_regs; evrec is a part
* of pt_regs in HVM mode. Save R0/R1, set handler's address in R1.
* R0 is the address of pt_regs and is the parameter to save_pt_regs.
*/
/*
* Since the HVM isn't automagically pushing the EVREC onto the stack anymore,
* we'll subract the entire size out and then fill it in ourselves.
* Need to save off R0, R1, R2, R3 immediately.
*/
#define vm_event_entry(CHandler) \
{ \
R29 = add(R29, #-(_PT_REGS_SIZE)); \
memd(R29 + #(_PT_R0100 + -_PT_REGS_SIZE)) = R1:0; \
} \
{ \
memd(R29 +#_PT_R0302) = R3:2; \
} \
trap1(#HVM_TRAP1_VMGETREGS); \
{ \
memd(R29 + #_PT_ER_VMEL) = R1:0; \
R0 = R29; \
R1.L = #LO(CHandler); \
} \
{ \
memd(R29 + #_PT_ER_VMPSP) = R3:2; \
R1.H = #HI(CHandler); \
jump event_dispatch; \
}
.text
/*
* Do bulk save/restore in one place.
* Adds a jump to dispatch latency, but
* saves hundreds of bytes.
*/
event_dispatch:
save_pt_regs()
callr r1
/*
* If we were in kernel mode, we don't need to check scheduler
* or signals if CONFIG_PREEMPT is not set. If set, then it has
* to jump to a need_resched kind of block.
* BTW, CONFIG_PREEMPT is not supported yet.
*/
#ifdef CONFIG_PREEMPT
R0 = #VM_INT_DISABLE
trap1(#HVM_TRAP1_VMSETIE)
#endif
/* "Nested control path" -- if the previous mode was kernel */
R0 = memw(R29 + #_PT_ER_VMEST);
P0 = tstbit(R0, #HVM_VMEST_UM_SFT);
if !P0 jump restore_all;
/*
* Returning from system call, normally coming back from user mode
*/
return_from_syscall:
/* Disable interrupts while checking TIF */
R0 = #VM_INT_DISABLE
trap1(#HVM_TRAP1_VMSETIE)
/*
* Coming back from the C-world, our thread info pointer
* should be in the designated register (usually R19)
*/
R1.L = #LO(_TIF_ALLWORK_MASK)
{
R1.H = #HI(_TIF_ALLWORK_MASK);
R0 = memw(THREADINFO_REG + #_THREAD_INFO_FLAGS);
}
/*
* Compare against the "return to userspace" _TIF_WORK_MASK
*/
R1 = and(R1,R0);
{ P0 = cmp.eq(R1,#0); if (!P0.new) jump:t work_pending;}
jump restore_all; /* we're outta here! */
work_pending:
{
P0 = tstbit(R1, #TIF_NEED_RESCHED);
if (!P0.new) jump:nt work_notifysig;
}
call schedule
jump return_from_syscall; /* check for more work */
work_notifysig:
/* this is the part that's kind of fuzzy. */
R1 = and(R0, #(_TIF_SIGPENDING | _TIF_NOTIFY_RESUME));
P0 = cmp.eq(R1, #0);
if P0 jump restore_all
R1 = R0; /* unsigned long thread_info_flags */
R0 = R29; /* regs should still be at top of stack */
call do_notify_resume
restore_all:
/* Disable interrupts, if they weren't already, before reg restore. */
R0 = #VM_INT_DISABLE
trap1(#HVM_TRAP1_VMSETIE)
/* do the setregs here for VM 0.5 */
/* R29 here should already be pointing at pt_regs */
R1:0 = memd(R29 + #_PT_ER_VMEL);
R3:2 = memd(R29 + #_PT_ER_VMPSP);
trap1(#HVM_TRAP1_VMSETREGS);
R0 = R29
restore_pt_regs()
R1:0 = memd(R29 + #_PT_R0100);
R29 = add(R29, #_PT_REGS_SIZE);
trap1(#HVM_TRAP1_VMRTE)
/* Notreached */
.globl _K_enter_genex
_K_enter_genex:
vm_event_entry(do_genex)
.globl _K_enter_interrupt
_K_enter_interrupt:
vm_event_entry(arch_do_IRQ)
.globl _K_enter_trap0
_K_enter_trap0:
vm_event_entry(do_trap0)
.globl _K_enter_machcheck
_K_enter_machcheck:
vm_event_entry(do_machcheck)
.globl ret_from_fork
ret_from_fork:
call schedule_tail
jump return_from_syscall
/*
* Mostly IRQ support for Hexagon
*
* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*/
#include <linux/kernel.h>
#include <asm/registers.h>
#include <linux/irq.h>
#include <linux/hardirq.h>
#include <asm/system.h>
/*
* show_regs - print pt_regs structure
* @regs: pointer to pt_regs
*
* To-do: add all the accessor definitions to registers.h
*
* Will make this routine a lot easier to write.
*/
void show_regs(struct pt_regs *regs)
{
printk(KERN_EMERG "restart_r0: \t0x%08lx syscall_nr: %ld\n",
regs->restart_r0, regs->syscall_nr);
printk(KERN_EMERG "preds: \t\t0x%08lx\n", regs->preds);
printk(KERN_EMERG "lc0: \t0x%08lx sa0: 0x%08lx m0: 0x%08lx\n",
regs->lc0, regs->sa0, regs->m0);
printk(KERN_EMERG "lc1: \t0x%08lx sa1: 0x%08lx m1: 0x%08lx\n",
regs->lc1, regs->sa1, regs->m1);
printk(KERN_EMERG "gp: \t0x%08lx ugp: 0x%08lx usr: 0x%08lx\n",
regs->gp, regs->ugp, regs->usr);
printk(KERN_EMERG "r0: \t0x%08lx %08lx %08lx %08lx\n", regs->r00,
regs->r01,
regs->r02,
regs->r03);
printk(KERN_EMERG "r4: \t0x%08lx %08lx %08lx %08lx\n", regs->r04,
regs->r05,
regs->r06,
regs->r07);
printk(KERN_EMERG "r8: \t0x%08lx %08lx %08lx %08lx\n", regs->r08,
regs->r09,
regs->r10,
regs->r11);
printk(KERN_EMERG "r12: \t0x%08lx %08lx %08lx %08lx\n", regs->r12,
regs->r13,
regs->r14,
regs->r15);
printk(KERN_EMERG "r16: \t0x%08lx %08lx %08lx %08lx\n", regs->r16,
regs->r17,
regs->r18,
regs->r19);
printk(KERN_EMERG "r20: \t0x%08lx %08lx %08lx %08lx\n", regs->r20,
regs->r21,
regs->r22,
regs->r23);
printk(KERN_EMERG "r24: \t0x%08lx %08lx %08lx %08lx\n", regs->r24,
regs->r25,
regs->r26,
regs->r27);
printk(KERN_EMERG "r28: \t0x%08lx %08lx %08lx %08lx\n", regs->r28,
regs->r29,
regs->r30,
regs->r31);
printk(KERN_EMERG "elr: \t0x%08lx cause: 0x%08lx user_mode: %d\n",
pt_elr(regs), pt_cause(regs), user_mode(regs));
printk(KERN_EMERG "psp: \t0x%08lx badva: 0x%08lx int_enabled: %d\n",
pt_psp(regs), pt_badva(regs), ints_enabled(regs));
}
void dummy_handler(struct pt_regs *regs)
{
unsigned int elr = pt_elr(regs);
printk(KERN_ERR "Unimplemented handler; ELR=0x%08x\n", elr);
}
void arch_do_IRQ(struct pt_regs *regs)
{
int irq = pt_cause(regs);
struct pt_regs *old_regs = set_irq_regs(regs);
irq_enter();
generic_handle_irq(irq);
irq_exit();
set_irq_regs(old_regs);
}
/*
* Initial page table for Linux kernel under Hexagon VM,
*
* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*/
/*
* These tables are pre-computed and linked into kernel.
*/
#include <asm/vm_mmu.h>
/* #include <asm/iomap.h> */
/*
* Start with mapping PA=0 to both VA=0x0 and VA=0xc000000 as 16MB large pages.
* No user mode access, RWX, write-back cache. The entry needs
* to be replicated for all 4 virtual segments mapping to the page.
*/
/* "Big Kernel Page" */
#define BKP(pa) (((pa) & __HVM_PTE_PGMASK_4MB) \
| __HVM_PTE_R | __HVM_PTE_W | __HVM_PTE_X \
| __HEXAGON_C_WB_L2 << 6 \
| __HVM_PDE_S_16MB)
/* No cache version */
#define BKPG_IO(pa) (((pa) & __HVM_PTE_PGMASK_16MB) \
| __HVM_PTE_R | __HVM_PTE_W | __HVM_PTE_X \
| __HVM_PDE_S_16MB | __HEXAGON_C_DEV << 6 )
#define FOURK_IO(pa) (((pa) & __HVM_PTE_PGMASK_4KB) \
| __HVM_PTE_R | __HVM_PTE_W | __HVM_PTE_X \
| __HEXAGON_C_DEV << 6 )
#define L2_PTR(pa) (((pa) & __HVM_PTE_PGMASK_4KB) \
| __HVM_PDE_S_4KB )
#define X __HVM_PDE_S_INVALID
.p2align 12
.globl swapper_pg_dir
.globl _K_init_segtable
swapper_pg_dir:
/* VA 0x00000000 */
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X
.word X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X
.word X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X
.word X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X
.word X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X
.word X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X
.word X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X
.word X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X
/* VA 0x40000000 */
.word X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X
.word X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X
.word X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X
.word X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X
.word X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X
.word X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X
.word X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X
.word X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X
/* VA 0x80000000 */
.word X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X
.word X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X
.word X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X
.word X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X
.word X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X
/*0xa8*/.word X,X,X,X
#ifdef CONFIG_COMET_EARLY_UART_DEBUG
UART_PTE_ENTRY:
/*0xa9*/.word BKPG_IO(0xa9000000),BKPG_IO(0xa9000000),BKPG_IO(0xa9000000),BKPG_IO(0xa9000000)
#else
/*0xa9*/.word X,X,X,X
#endif
/*0xaa*/.word X,X,X,X
/*0xab*/.word X,X,X,X
/*0xac*/.word X,X,X,X
/*0xad*/.word X,X,X,X
/*0xae*/.word X,X,X,X
/*0xaf*/.word X,X,X,X
/*0xb0*/.word X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X
.word X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X
_K_init_segtable:
/* VA 0xC0000000 */
.word BKP(0x00000000), BKP(0x00400000), BKP(0x00800000), BKP(0x00c00000)
.word BKP(0x01000000), BKP(0x01400000), BKP(0x01800000), BKP(0x01c00000)
.word BKP(0x02000000), BKP(0x02400000), BKP(0x02800000), BKP(0x02c00000)
.word BKP(0x03000000), BKP(0x03400000), BKP(0x03800000), BKP(0x03c00000)
.word BKP(0x04000000), BKP(0x04400000), BKP(0x04800000), BKP(0x04c00000)
.word BKP(0x05000000), BKP(0x05400000), BKP(0x05800000), BKP(0x05c00000)
.word BKP(0x06000000), BKP(0x06400000), BKP(0x06800000), BKP(0x06c00000)
.word BKP(0x07000000), BKP(0x07400000), BKP(0x07800000), BKP(0x07c00000)
.word BKP(0x08000000), BKP(0x08400000), BKP(0x08800000), BKP(0x08c00000)
.word BKP(0x09000000), BKP(0x09400000), BKP(0x09800000), BKP(0x09c00000)
.word BKP(0x0a000000), BKP(0x0a400000), BKP(0x0a800000), BKP(0x0ac00000)
.word BKP(0x0b000000), BKP(0x0b400000), BKP(0x0b800000), BKP(0x0bc00000)
.word BKP(0x0c000000), BKP(0x0c400000), BKP(0x0c800000), BKP(0x0cc00000)
.word BKP(0x0d000000), BKP(0x0d400000), BKP(0x0d800000), BKP(0x0dc00000)
.word BKP(0x0e000000), BKP(0x0e400000), BKP(0x0e800000), BKP(0x0ec00000)
.word BKP(0x0f000000), BKP(0x0f400000), BKP(0x0f800000), BKP(0x0fc00000)
.word BKP(0x10000000), BKP(0x10400000), BKP(0x10800000), BKP(0x10c00000)
.word BKP(0x11000000), BKP(0x11400000), BKP(0x11800000), BKP(0x11c00000)
.word BKP(0x12000000), BKP(0x12400000), BKP(0x12800000), BKP(0x12c00000)
.word BKP(0x13000000), BKP(0x13400000), BKP(0x13800000), BKP(0x13c00000)
.word BKP(0x14000000), BKP(0x14400000), BKP(0x14800000), BKP(0x14c00000)
.word BKP(0x15000000), BKP(0x15400000), BKP(0x15800000), BKP(0x15c00000)
.word BKP(0x16000000), BKP(0x16400000), BKP(0x16800000), BKP(0x16c00000)
.word BKP(0x17000000), BKP(0x17400000), BKP(0x17800000), BKP(0x17c00000)
.word BKP(0x18000000), BKP(0x18400000), BKP(0x18800000), BKP(0x18c00000)
.word BKP(0x19000000), BKP(0x19400000), BKP(0x19800000), BKP(0x19c00000)
.word BKP(0x1a000000), BKP(0x1a400000), BKP(0x1a800000), BKP(0x1ac00000)
.word BKP(0x1b000000), BKP(0x1b400000), BKP(0x1b800000), BKP(0x1bc00000)
.word BKP(0x1c000000), BKP(0x1c400000), BKP(0x1c800000), BKP(0x1cc00000)
.word BKP(0x1d000000), BKP(0x1d400000), BKP(0x1d800000), BKP(0x1dc00000)
.word BKP(0x1e000000), BKP(0x1e400000), BKP(0x1e800000), BKP(0x1ec00000)
.word BKP(0x1f000000), BKP(0x1f400000), BKP(0x1f800000), BKP(0x1fc00000)
.word BKP(0x20000000), BKP(0x20400000), BKP(0x20800000), BKP(0x20c00000)
.word BKP(0x21000000), BKP(0x21400000), BKP(0x21800000), BKP(0x21c00000)
.word BKP(0x22000000), BKP(0x22400000), BKP(0x22800000), BKP(0x22c00000)
.word BKP(0x23000000), BKP(0x23400000), BKP(0x23800000), BKP(0x23c00000)
.word BKP(0x24000000), BKP(0x24400000), BKP(0x24800000), BKP(0x24c00000)
.word BKP(0x25000000), BKP(0x25400000), BKP(0x25800000), BKP(0x25c00000)
.word BKP(0x26000000), BKP(0x26400000), BKP(0x26800000), BKP(0x26c00000)
.word BKP(0x27000000), BKP(0x27400000), BKP(0x27800000), BKP(0x27c00000)
.word BKP(0x28000000), BKP(0x28400000), BKP(0x28800000), BKP(0x28c00000)
.word BKP(0x29000000), BKP(0x29400000), BKP(0x29800000), BKP(0x29c00000)
.word BKP(0x2a000000), BKP(0x2a400000), BKP(0x2a800000), BKP(0x2ac00000)
.word BKP(0x2b000000), BKP(0x2b400000), BKP(0x2b800000), BKP(0x2bc00000)
.word BKP(0x2c000000), BKP(0x2c400000), BKP(0x2c800000), BKP(0x2cc00000)
.word BKP(0x2d000000), BKP(0x2d400000), BKP(0x2d800000), BKP(0x2dc00000)
.word BKP(0x2e000000), BKP(0x2e400000), BKP(0x2e800000), BKP(0x2ec00000)
.word BKP(0x2f000000), BKP(0x2f400000), BKP(0x2f800000), BKP(0x2fc00000)
.word BKP(0x30000000), BKP(0x30400000), BKP(0x30800000), BKP(0x30c00000)
.word BKP(0x31000000), BKP(0x31400000), BKP(0x31800000), BKP(0x31c00000)
.word BKP(0x32000000), BKP(0x32400000), BKP(0x32800000), BKP(0x32c00000)
.word BKP(0x33000000), BKP(0x33400000), BKP(0x33800000), BKP(0x33c00000)
.word BKP(0x34000000), BKP(0x34400000), BKP(0x34800000), BKP(0x34c00000)
.word BKP(0x35000000), BKP(0x35400000), BKP(0x35800000), BKP(0x35c00000)
.word BKP(0x36000000), BKP(0x36400000), BKP(0x36800000), BKP(0x36c00000)
.word BKP(0x37000000), BKP(0x37400000), BKP(0x37800000), BKP(0x37c00000)
.word BKP(0x38000000), BKP(0x38400000), BKP(0x38800000), BKP(0x38c00000)
.word BKP(0x39000000), BKP(0x39400000), BKP(0x39800000), BKP(0x39c00000)
.word BKP(0x3a000000), BKP(0x3a400000), BKP(0x3a800000), BKP(0x3ac00000)
.word BKP(0x3b000000), BKP(0x3b400000), BKP(0x3b800000), BKP(0x3bc00000)
.word BKP(0x3c000000), BKP(0x3c400000), BKP(0x3c800000), BKP(0x3cc00000)
.word BKP(0x3d000000), BKP(0x3d400000), BKP(0x3d800000), BKP(0x3dc00000)
_K_io_map:
.word X,X,X,X /* 0x3e000000 - device IO early remap */
.word X,X,X,X /* 0x3f000000 - hypervisor space*/
#if 0
/*
* This is in here as an example for devices which need to be mapped really
* early.
*/
.p2align 12
.globl _K_io_kmap
.globl _K_init_devicetable
_K_init_devicetable: /* Should be 4MB worth of entries */
.word FOURK_IO(MSM_GPIO1_PHYS),FOURK_IO(MSM_GPIO2_PHYS),FOURK_IO(MSM_SIRC_PHYS),X
.word FOURK_IO(TLMM_GPIO1_PHYS),X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
#endif
/*
* Hexagon VM instruction support
*
* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*/
#include <linux/linkage.h>
#include <asm/hexagon_vm.h>
/*
* C wrappers for virtual machine "instructions". These
* could be, and perhaps some day will be, handled as in-line
* macros, but for tracing/debugging it's handy to have
* a single point of invocation for each of them.
* Conveniently, they take paramters and return values
* consistent with the ABI calling convention.
*/
ENTRY(__vmrte)
trap1(#HVM_TRAP1_VMRTE);
jumpr R31;
ENTRY(__vmsetvec)
trap1(#HVM_TRAP1_VMSETVEC);
jumpr R31;
ENTRY(__vmsetie)
trap1(#HVM_TRAP1_VMSETIE);
jumpr R31;
ENTRY(__vmgetie)
trap1(#HVM_TRAP1_VMGETIE);
jumpr R31;
ENTRY(__vmintop)
trap1(#HVM_TRAP1_VMINTOP);
jumpr R31;
ENTRY(__vmclrmap)
trap1(#HVM_TRAP1_VMCLRMAP);
jumpr R31;
ENTRY(__vmnewmap)
r1 = #VM_NEWMAP_TYPE_PGTABLES;
trap1(#HVM_TRAP1_VMNEWMAP);
jumpr R31;
ENTRY(__vmcache)
trap1(#HVM_TRAP1_VMCACHE);
jumpr R31;
ENTRY(__vmgettime)
trap1(#HVM_TRAP1_VMGETTIME);
jumpr R31;
ENTRY(__vmsettime)
trap1(#HVM_TRAP1_VMSETTIME);
jumpr R31;
ENTRY(__vmwait)
trap1(#HVM_TRAP1_VMWAIT);
jumpr R31;
ENTRY(__vmyield)
trap1(#HVM_TRAP1_VMYIELD);
jumpr R31;
ENTRY(__vmstart)
trap1(#HVM_TRAP1_VMSTART);
jumpr R31;
ENTRY(__vmstop)
trap1(#HVM_TRAP1_VMSTOP);
jumpr R31;
ENTRY(__vmvpid)
trap1(#HVM_TRAP1_VMVPID);
jumpr R31;
/* Probably not actually going to use these; see vm_entry.S */
ENTRY(__vmsetregs)
trap1(#HVM_TRAP1_VMSETREGS);
jumpr R31;
ENTRY(__vmgetregs)
trap1(#HVM_TRAP1_VMGETREGS);
jumpr R31;
/*
* Context switch support for Hexagon
*
* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*/
#include <asm/asm-offsets.h>
.text
/*
* The register used as a fast-path thread information pointer
* is determined as a kernel configuration option. If it happens
* to be a callee-save register, we're going to be saving and
* restoring it twice here.
*
* This code anticipates a revised ABI where R20-23 are added
* to the set of callee-save registers, but this should be
* backward compatible to legacy tools.
*/
/*
* void switch_to(struct task_struct *prev,
* struct task_struct *next, struct task_struct *last);
*/
.p2align 2
.globl __switch_to
.type __switch_to, @function
/*
* When we exit the wormhole, we need to store the previous task
* in the new R0's pointer. Technically it should be R2, but they should
* be the same; seems like a legacy thing. In short, don't butcher
* R0, let it go back out unmolested.
*/
__switch_to:
/*
* Push callee-saves onto "prev" stack.
* Here, we're sneaky because the LR and FP
* storage of the thread_stack structure
* is automagically allocated by allocframe,
* so we pass struct size less 8.
*/
allocframe(#(_SWITCH_STACK_SIZE - 8));
memd(R29+#(_SWITCH_R2726))=R27:26;
memd(R29+#(_SWITCH_R2524))=R25:24;
memd(R29+#(_SWITCH_R2322))=R23:22;
memd(R29+#(_SWITCH_R2120))=R21:20;
memd(R29+#(_SWITCH_R1918))=R19:18;
memd(R29+#(_SWITCH_R1716))=R17:16;
/* Stash thread_info pointer in task_struct */
memw(R0+#_TASK_THREAD_INFO) = THREADINFO_REG;
memw(R0 +#(_TASK_STRUCT_THREAD + _THREAD_STRUCT_SWITCH_SP)) = R29;
/* Switch to "next" stack and restore callee saves from there */
R29 = memw(R1 + #(_TASK_STRUCT_THREAD + _THREAD_STRUCT_SWITCH_SP));
{
R27:26 = memd(R29+#(_SWITCH_R2726));
R25:24 = memd(R29+#(_SWITCH_R2524));
}
{
R23:22 = memd(R29+#(_SWITCH_R2322));
R21:20 = memd(R29+#(_SWITCH_R2120));
}
{
R19:18 = memd(R29+#(_SWITCH_R1918));
R17:16 = memd(R29+#(_SWITCH_R1716));
}
{
/* THREADINFO_REG is currently one of the callee-saved regs
* above, and so be sure to re-load it last.
*/
THREADINFO_REG = memw(R1 + #_TASK_THREAD_INFO);
R31:30 = memd(R29+#_SWITCH_FP);
}
{
R29 = add(R29,#_SWITCH_STACK_SIZE);
jumpr R31;
}
.size __switch_to, .-__switch_to
/*
* Event jump tables
*
* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*/
#include <asm/hexagon_vm.h>
.text
/* This is registered early on to allow angel */
.global _K_provisional_vec
_K_provisional_vec:
jump 1f;
jump 1f;
jump 1f;
jump 1f;
jump 1f;
trap1(#HVM_TRAP1_VMRTE)
jump 1f;
jump 1f;
.global _K_VM_event_vector
_K_VM_event_vector:
1:
jump 1b; /* Reset */
jump _K_enter_machcheck;
jump _K_enter_genex;
jump 1b; /* 3 Rsvd */
jump 1b; /* 4 Rsvd */
jump _K_enter_trap0;
jump 1b; /* 6 Rsvd */
jump _K_enter_interrupt;
/*
* Linker script for Hexagon kernel
*
* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*/
#define LOAD_OFFSET PAGE_OFFSET
#include <asm-generic/vmlinux.lds.h>
#include <asm/asm-offsets.h> /* Most of the kernel defines are here */
#include <asm/mem-layout.h> /* except for page_offset */
#include <asm/cache.h> /* and now we're pulling cache line size */
OUTPUT_ARCH(hexagon)
ENTRY(stext)
jiffies = jiffies_64;
/*
See asm-generic/vmlinux.lds.h for expansion of some of these macros.
See asm-generic/sections.h for seemingly required labels.
*/
#define PAGE_SIZE _PAGE_SIZE
/* This LOAD_OFFSET is temporary for debugging on the simulator; it may change
for hypervisor pseudo-physical memory. */
SECTIONS
{
. = PAGE_OFFSET + LOAD_ADDRESS;
__init_begin = .;
HEAD_TEXT_SECTION
INIT_TEXT_SECTION(PAGE_SIZE)
PERCPU_SECTION(L1_CACHE_BYTES)
__init_end = .;
. = ALIGN(_PAGE_SIZE);
_stext = .;
.text : AT(ADDR(.text) - LOAD_OFFSET) {
_text = .;
TEXT_TEXT
SCHED_TEXT
LOCK_TEXT
KPROBES_TEXT
*(.fixup)
}
_etext = .;
INIT_DATA_SECTION(PAGE_SIZE)
_sdata = .;
RW_DATA_SECTION(32,PAGE_SIZE,PAGE_SIZE)
RO_DATA_SECTION(PAGE_SIZE)
_edata = .;
EXCEPTION_TABLE(16)
NOTES
BSS_SECTION(_PAGE_SIZE, _PAGE_SIZE, _PAGE_SIZE)
_end = .;
/DISCARD/ : {
EXIT_TEXT
EXIT_DATA
EXIT_CALL
}
STABS_DEBUG
DWARF_DEBUG
}
#
# Makefile for hexagon-specific library files.
#
obj-y = checksum.o io.o memcpy.o memset.o
/*
* Checksum functions for Hexagon
*
* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*/
/* This was derived from arch/alpha/lib/checksum.c */
#include <linux/module.h>
#include <linux/string.h>
#include <asm/byteorder.h>
#include <net/checksum.h>
#include <linux/uaccess.h>
#include <asm/intrinsics.h>
/* Vector value operations */
#define SIGN(x, y) ((0x8000ULL*x)<<y)
#define CARRY(x, y) ((0x0002ULL*x)<<y)
#define SELECT(x, y) ((0x0001ULL*x)<<y)
#define VR_NEGATE(a, b, c, d) (SIGN(a, 48) + SIGN(b, 32) + SIGN(c, 16) \
+ SIGN(d, 0))
#define VR_CARRY(a, b, c, d) (CARRY(a, 48) + CARRY(b, 32) + CARRY(c, 16) \
+ CARRY(d, 0))
#define VR_SELECT(a, b, c, d) (SELECT(a, 48) + SELECT(b, 32) + SELECT(c, 16) \
+ SELECT(d, 0))
/* optimized HEXAGON V3 intrinsic version */
static inline unsigned short from64to16(u64 x)
{
u64 sum;
sum = HEXAGON_P_vrmpyh_PP(x^VR_NEGATE(1, 1, 1, 1),
VR_SELECT(1, 1, 1, 1));
sum += VR_CARRY(0, 0, 1, 0);
sum = HEXAGON_P_vrmpyh_PP(sum, VR_SELECT(0, 0, 1, 1));
return 0xFFFF & sum;
}
/*
* computes the checksum of the TCP/UDP pseudo-header
* returns a 16-bit checksum, already complemented.
*/
__sum16 csum_tcpudp_magic(unsigned long saddr, unsigned long daddr,
unsigned short len, unsigned short proto,
__wsum sum)
{
return (__force __sum16)~from64to16(
(__force u64)saddr + (__force u64)daddr +
(__force u64)sum + ((len + proto) << 8));
}
__wsum csum_tcpudp_nofold(unsigned long saddr, unsigned long daddr,
unsigned short len, unsigned short proto,
__wsum sum)
{
u64 result;
result = (__force u64)saddr + (__force u64)daddr +
(__force u64)sum + ((len + proto) << 8);
/* Fold down to 32-bits so we don't lose in the typedef-less
network stack. */
/* 64 to 33 */
result = (result & 0xffffffffUL) + (result >> 32);
/* 33 to 32 */
result = (result & 0xffffffffUL) + (result >> 32);
return (__force __wsum)result;
}
EXPORT_SYMBOL(csum_tcpudp_nofold);
/*
* Do a 64-bit checksum on an arbitrary memory area..
*
* This isn't a great routine, but it's not _horrible_ either. The
* inner loop could be unrolled a bit further, and there are better
* ways to do the carry, but this is reasonable.
*/
/* optimized HEXAGON intrinsic version, with over read fixed */
unsigned int do_csum(const void *voidptr, int len)
{
u64 sum0, sum1, x0, x1, *ptr8_o, *ptr8_e, *ptr8;
int i, start, mid, end, mask;
const char *ptr = voidptr;
unsigned short *ptr2;
unsigned int *ptr4;
if (len <= 0)
return 0;
start = 0xF & (16-(((int) ptr) & 0xF)) ;
mask = 0x7fffffffUL >> HEXAGON_R_cl0_R(len);
start = start & mask ;
mid = len - start;
end = mid & 0xF;
mid = mid>>4;
sum0 = mid << 18;
sum1 = 0;
if (start & 1)
sum0 += (u64) (ptr[0] << 8);
ptr2 = (unsigned short *) &ptr[start & 1];
if (start & 2)
sum1 += (u64) ptr2[0];
ptr4 = (unsigned int *) &ptr[start & 3];
if (start & 4) {
sum0 = HEXAGON_P_vrmpyhacc_PP(sum0,
VR_NEGATE(0, 0, 1, 1)^((u64)ptr4[0]),
VR_SELECT(0, 0, 1, 1));
sum0 += VR_SELECT(0, 0, 1, 0);
}
ptr8 = (u64 *) &ptr[start & 7];
if (start & 8) {
sum1 = HEXAGON_P_vrmpyhacc_PP(sum1,
VR_NEGATE(1, 1, 1, 1)^(ptr8[0]),
VR_SELECT(1, 1, 1, 1));
sum1 += VR_CARRY(0, 0, 1, 0);
}
ptr8_o = (u64 *) (ptr + start);
ptr8_e = (u64 *) (ptr + start + 8);
if (mid) {
x0 = *ptr8_e; ptr8_e += 2;
x1 = *ptr8_o; ptr8_o += 2;
if (mid > 1)
for (i = 0; i < mid-1; i++) {
sum0 = HEXAGON_P_vrmpyhacc_PP(sum0,
x0^VR_NEGATE(1, 1, 1, 1),
VR_SELECT(1, 1, 1, 1));
sum1 = HEXAGON_P_vrmpyhacc_PP(sum1,
x1^VR_NEGATE(1, 1, 1, 1),
VR_SELECT(1, 1, 1, 1));
x0 = *ptr8_e; ptr8_e += 2;
x1 = *ptr8_o; ptr8_o += 2;
}
sum0 = HEXAGON_P_vrmpyhacc_PP(sum0, x0^VR_NEGATE(1, 1, 1, 1),
VR_SELECT(1, 1, 1, 1));
sum1 = HEXAGON_P_vrmpyhacc_PP(sum1, x1^VR_NEGATE(1, 1, 1, 1),
VR_SELECT(1, 1, 1, 1));
}
ptr4 = (unsigned int *) &ptr[start + (mid * 16) + (end & 8)];
if (end & 4) {
sum1 = HEXAGON_P_vrmpyhacc_PP(sum1,
VR_NEGATE(0, 0, 1, 1)^((u64)ptr4[0]),
VR_SELECT(0, 0, 1, 1));
sum1 += VR_SELECT(0, 0, 1, 0);
}
ptr2 = (unsigned short *) &ptr[start + (mid * 16) + (end & 12)];
if (end & 2)
sum0 += (u64) ptr2[0];
if (end & 1)
sum1 += (u64) ptr[start + (mid * 16) + (end & 14)];
ptr8 = (u64 *) &ptr[start + (mid * 16)];
if (end & 8) {
sum0 = HEXAGON_P_vrmpyhacc_PP(sum0,
VR_NEGATE(1, 1, 1, 1)^(ptr8[0]),
VR_SELECT(1, 1, 1, 1));
sum0 += VR_CARRY(0, 0, 1, 0);
}
sum0 = HEXAGON_P_vrmpyh_PP((sum0+sum1)^VR_NEGATE(0, 0, 0, 1),
VR_SELECT(0, 0, 1, 1));
sum0 += VR_NEGATE(0, 0, 0, 1);
sum0 = HEXAGON_P_vrmpyh_PP(sum0, VR_SELECT(0, 0, 1, 1));
if (start & 1)
sum0 = (sum0 << 8) | (0xFF & (sum0 >> 8));
return 0xFFFF & sum0;
}
/*
* copy from ds while checksumming, otherwise like csum_partial
*/
__wsum
csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum)
{
memcpy(dst, src, len);
return csum_partial(dst, len, sum);
}
/*
* I/O access functions for Hexagon
*
* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*/
#include <asm/io.h>
/* These are all FIFO routines! */
/*
* __raw_readsw - read words a short at a time
* @addr: source address
* @data: data address
* @len: number of shorts to read
*/
void __raw_readsw(const void __iomem *addr, void *data, int len)
{
const volatile short int *src = (short int *) addr;
short int *dst = (short int *) data;
if ((u32)data & 0x1)
panic("unaligned pointer to readsw");
while (len-- > 0)
*dst++ = *src;
}
/*
* __raw_writesw - read words a short at a time
* @addr: source address
* @data: data address
* @len: number of shorts to read
*/
void __raw_writesw(void __iomem *addr, const void *data, int len)
{
const short int *src = (short int *)data;
volatile short int *dst = (short int *)addr;
if ((u32)data & 0x1)
panic("unaligned pointer to writesw");
while (len-- > 0)
*dst = *src++;
}
/* Pretty sure len is pre-adjusted for the length of the access already */
void __raw_readsl(const void __iomem *addr, void *data, int len)
{
const volatile long *src = (long *) addr;
long *dst = (long *) data;
if ((u32)data & 0x3)
panic("unaligned pointer to readsl");
while (len-- > 0)
*dst++ = *src;
}
void __raw_writesl(void __iomem *addr, const void *data, int len)
{
const long *src = (long *)data;
volatile long *dst = (long *)addr;
if ((u32)data & 0x3)
panic("unaligned pointer to writesl");
while (len-- > 0)
*dst = *src++;
}
/*
* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
*
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*/
/*
* Description
*
* library function for memcpy where length bytes are copied from
* ptr_in to ptr_out. ptr_out is returned unchanged.
* Allows any combination of alignment on input and output pointers
* and length from 0 to 2^32-1
*
* Restrictions
* The arrays should not overlap, the program will produce undefined output
* if they do.
* For blocks less than 16 bytes a byte by byte copy is performed. For
* 8byte alignments, and length multiples, a dword copy is performed up to
* 96bytes
* History
*
* DJH 5/15/09 Initial version 1.0
* DJH 6/ 1/09 Version 1.1 modified ABI to inlcude R16-R19
* DJH 7/12/09 Version 1.2 optimized codesize down to 760 was 840
* DJH 10/14/09 Version 1.3 added special loop for aligned case, was
* overreading bloated codesize back up to 892
* DJH 4/20/10 Version 1.4 fixed Ldword_loop_epilog loop to prevent loads
* occuring if only 1 left outstanding, fixes bug
* # 3888, corrected for all alignments. Peeled off
* 1 32byte chunk from kernel loop and extended 8byte
* loop at end to solve all combinations and prevent
* over read. Fixed Ldword_loop_prolog to prevent
* overread for blocks less than 48bytes. Reduced
* codesize to 752 bytes
* DJH 4/21/10 version 1.5 1.4 fix broke code for input block ends not
* aligned to dword boundaries,underwriting by 1
* byte, added detection for this and fixed. A
* little bloat.
* DJH 4/23/10 version 1.6 corrected stack error, R20 was not being restored
* always, fixed the error of R20 being modified
* before it was being saved
* Natural c model
* ===============
* void * memcpy(char * ptr_out, char * ptr_in, int length) {
* int i;
* if(length) for(i=0; i < length; i++) { ptr_out[i] = ptr_in[i]; }
* return(ptr_out);
* }
*
* Optimized memcpy function
* =========================
* void * memcpy(char * ptr_out, char * ptr_in, int len) {
* int i, prolog, kernel, epilog, mask;
* u8 offset;
* s64 data0, dataF8, data70;
*
* s64 * ptr8_in;
* s64 * ptr8_out;
* s32 * ptr4;
* s16 * ptr2;
*
* offset = ((int) ptr_in) & 7;
* ptr8_in = (s64 *) &ptr_in[-offset]; //read in the aligned pointers
*
* data70 = *ptr8_in++;
* dataF8 = *ptr8_in++;
*
* data0 = HEXAGON_P_valignb_PPp(dataF8, data70, offset);
*
* prolog = 32 - ((int) ptr_out);
* mask = 0x7fffffff >> HEXAGON_R_cl0_R(len);
* prolog = prolog & mask;
* kernel = len - prolog;
* epilog = kernel & 0x1F;
* kernel = kernel>>5;
*
* if (prolog & 1) { ptr_out[0] = (u8) data0; data0 >>= 8; ptr_out += 1;}
* ptr2 = (s16 *) &ptr_out[0];
* if (prolog & 2) { ptr2[0] = (u16) data0; data0 >>= 16; ptr_out += 2;}
* ptr4 = (s32 *) &ptr_out[0];
* if (prolog & 4) { ptr4[0] = (u32) data0; data0 >>= 32; ptr_out += 4;}
*
* offset = offset + (prolog & 7);
* if (offset >= 8) {
* data70 = dataF8;
* dataF8 = *ptr8_in++;
* }
* offset = offset & 0x7;
*
* prolog = prolog >> 3;
* if (prolog) for (i=0; i < prolog; i++) {
* data0 = HEXAGON_P_valignb_PPp(dataF8, data70, offset);
* ptr8_out = (s64 *) &ptr_out[0]; *ptr8_out = data0; ptr_out += 8;
* data70 = dataF8;
* dataF8 = *ptr8_in++;
* }
* if(kernel) { kernel -= 1; epilog += 32; }
* if(kernel) for(i=0; i < kernel; i++) {
* data0 = HEXAGON_P_valignb_PPp(dataF8, data70, offset);
* ptr8_out = (s64 *) &ptr_out[0]; *ptr8_out = data0; ptr_out += 8;
* data70 = *ptr8_in++;
*
* data0 = HEXAGON_P_valignb_PPp(data70, dataF8, offset);
* ptr8_out = (s64 *) &ptr_out[0]; *ptr8_out = data0; ptr_out += 8;
* dataF8 = *ptr8_in++;
*
* data0 = HEXAGON_P_valignb_PPp(dataF8, data70, offset);
* ptr8_out = (s64 *) &ptr_out[0]; *ptr8_out = data0; ptr_out += 8;
* data70 = *ptr8_in++;
*
* data0 = HEXAGON_P_valignb_PPp(data70, dataF8, offset);
* ptr8_out = (s64 *) &ptr_out[0]; *ptr8_out = data0; ptr_out += 8;
* dataF8 = *ptr8_in++;
* }
* epilogdws = epilog >> 3;
* if (epilogdws) for (i=0; i < epilogdws; i++) {
* data0 = HEXAGON_P_valignb_PPp(dataF8, data70, offset);
* ptr8_out = (s64 *) &ptr_out[0]; *ptr8_out = data0; ptr_out += 8;
* data70 = dataF8;
* dataF8 = *ptr8_in++;
* }
* data0 = HEXAGON_P_valignb_PPp(dataF8, data70, offset);
*
* ptr4 = (s32 *) &ptr_out[0];
* if (epilog & 4) { ptr4[0] = (u32) data0; data0 >>= 32; ptr_out += 4;}
* ptr2 = (s16 *) &ptr_out[0];
* if (epilog & 2) { ptr2[0] = (u16) data0; data0 >>= 16; ptr_out += 2;}
* if (epilog & 1) { *ptr_out++ = (u8) data0; }
*
* return(ptr_out - length);
* }
*
* Codesize : 784 bytes
*/
#define ptr_out R0 /* destination pounter */
#define ptr_in R1 /* source pointer */
#define len R2 /* length of copy in bytes */
#define data70 R13:12 /* lo 8 bytes of non-aligned transfer */
#define dataF8 R11:10 /* hi 8 bytes of non-aligned transfer */
#define ldata0 R7:6 /* even 8 bytes chunks */
#define ldata1 R25:24 /* odd 8 bytes chunks */
#define data1 R7 /* lower 8 bytes of ldata1 */
#define data0 R6 /* lower 8 bytes of ldata0 */
#define ifbyte p0 /* if transfer has bytes in epilog/prolog */
#define ifhword p0 /* if transfer has shorts in epilog/prolog */
#define ifword p0 /* if transfer has words in epilog/prolog */
#define noprolog p0 /* no prolog, xfer starts at 32byte */
#define nokernel p1 /* no 32byte multiple block in the transfer */
#define noepilog p0 /* no epilog, xfer ends on 32byte boundary */
#define align p2 /* alignment of input rel to 8byte boundary */
#define kernel1 p0 /* kernel count == 1 */
#define dalign R25 /* rel alignment of input to output data */
#define star3 R16 /* number bytes in prolog - dwords */
#define rest R8 /* length - prolog bytes */
#define back R7 /* nr bytes > dword boundary in src block */
#define epilog R3 /* bytes in epilog */
#define inc R15:14 /* inc kernel by -1 and defetch ptr by 32 */
#define kernel R4 /* number of 32byte chunks in kernel */
#define ptr_in_p_128 R5 /* pointer for prefetch of input data */
#define mask R8 /* mask used to determine prolog size */
#define shift R8 /* used to work a shifter to extract bytes */
#define shift2 R5 /* in epilog to workshifter to extract bytes */
#define prolog R15 /* bytes in prolog */
#define epilogdws R15 /* number dwords in epilog */
#define shiftb R14 /* used to extract bytes */
#define offset R9 /* same as align in reg */
#define ptr_out_p_32 R17 /* pointer to output dczero */
#define align888 R14 /* if simple dword loop can be used */
#define len8 R9 /* number of dwords in length */
#define over R20 /* nr of bytes > last inp buf dword boundary */
#define ptr_in_p_128kernel R5:4 /* packed fetch pointer & kernel cnt */
.section .text
.p2align 4
.global memcpy
.type memcpy, @function
memcpy:
{
p2 = cmp.eq(len, #0); /* =0 */
align888 = or(ptr_in, ptr_out); /* %8 < 97 */
p0 = cmp.gtu(len, #23); /* %1, <24 */
p1 = cmp.eq(ptr_in, ptr_out); /* attempt to overwrite self */
}
{
p1 = or(p2, p1);
p3 = cmp.gtu(len, #95); /* %8 < 97 */
align888 = or(align888, len); /* %8 < 97 */
len8 = lsr(len, #3); /* %8 < 97 */
}
{
dcfetch(ptr_in); /* zero/ptrin=ptrout causes fetch */
p2 = bitsclr(align888, #7); /* %8 < 97 */
if(p1) jumpr r31; /* =0 */
}
{
p2 = and(p2,!p3); /* %8 < 97 */
if (p2.new) len = add(len, #-8); /* %8 < 97 */
if (p2.new) jump:NT .Ldwordaligned; /* %8 < 97 */
}
{
if(!p0) jump .Lbytes23orless; /* %1, <24 */
mask.l = #LO(0x7fffffff);
/* all bytes before line multiples of data */
prolog = sub(#0, ptr_out);
}
{
/* save r31 on stack, decrement sp by 16 */
allocframe(#24);
mask.h = #HI(0x7fffffff);
ptr_in_p_128 = add(ptr_in, #32);
back = cl0(len);
}
{
memd(sp+#0) = R17:16; /* save r16,r17 on stack6 */
r31.l = #LO(.Lmemcpy_return); /* set up final return pointer */
prolog &= lsr(mask, back);
offset = and(ptr_in, #7);
}
{
memd(sp+#8) = R25:24; /* save r25,r24 on stack */
dalign = sub(ptr_out, ptr_in);
r31.h = #HI(.Lmemcpy_return); /* set up final return pointer */
}
{
/* see if there if input buffer end if aligned */
over = add(len, ptr_in);
back = add(len, offset);
memd(sp+#16) = R21:20; /* save r20,r21 on stack */
}
{
noprolog = bitsclr(prolog, #7);
prolog = and(prolog, #31);
dcfetch(ptr_in_p_128);
ptr_in_p_128 = add(ptr_in_p_128, #32);
}
{
kernel = sub(len, prolog);
shift = asl(prolog, #3);
star3 = and(prolog, #7);
ptr_in = and(ptr_in, #-8);
}
{
prolog = lsr(prolog, #3);
epilog = and(kernel, #31);
ptr_out_p_32 = add(ptr_out, prolog);
over = and(over, #7);
}
{
p3 = cmp.gtu(back, #8);
kernel = lsr(kernel, #5);
dcfetch(ptr_in_p_128);
ptr_in_p_128 = add(ptr_in_p_128, #32);
}
{
p1 = cmp.eq(prolog, #0);
if(!p1.new) prolog = add(prolog, #1);
dcfetch(ptr_in_p_128); /* reserve the line 64bytes on */
ptr_in_p_128 = add(ptr_in_p_128, #32);
}
{
nokernel = cmp.eq(kernel,#0);
dcfetch(ptr_in_p_128); /* reserve the line 64bytes on */
ptr_in_p_128 = add(ptr_in_p_128, #32);
shiftb = and(shift, #8);
}
{
dcfetch(ptr_in_p_128); /* reserve the line 64bytes on */
ptr_in_p_128 = add(ptr_in_p_128, #32);
if(nokernel) jump .Lskip64;
p2 = cmp.eq(kernel, #1); /* skip ovr if kernel == 0 */
}
{
dczeroa(ptr_out_p_32);
/* don't advance pointer */
if(!p2) ptr_out_p_32 = add(ptr_out_p_32, #32);
}
{
dalign = and(dalign, #31);
dczeroa(ptr_out_p_32);
}
.Lskip64:
{
data70 = memd(ptr_in++#16);
if(p3) dataF8 = memd(ptr_in+#8);
if(noprolog) jump .Lnoprolog32;
align = offset;
}
/* upto initial 7 bytes */
{
ldata0 = valignb(dataF8, data70, align);
ifbyte = tstbit(shift,#3);
offset = add(offset, star3);
}
{
if(ifbyte) memb(ptr_out++#1) = data0;
ldata0 = lsr(ldata0, shiftb);
shiftb = and(shift, #16);
ifhword = tstbit(shift,#4);
}
{
if(ifhword) memh(ptr_out++#2) = data0;
ldata0 = lsr(ldata0, shiftb);
ifword = tstbit(shift,#5);
p2 = cmp.gtu(offset, #7);
}
{
if(ifword) memw(ptr_out++#4) = data0;
if(p2) data70 = dataF8;
if(p2) dataF8 = memd(ptr_in++#8); /* another 8 bytes */
align = offset;
}
.Lnoprolog32:
{
p3 = sp1loop0(.Ldword_loop_prolog, prolog)
rest = sub(len, star3); /* whats left after the loop */
p0 = cmp.gt(over, #0);
}
if(p0) rest = add(rest, #16);
.Ldword_loop_prolog:
{
if(p3) memd(ptr_out++#8) = ldata0;
ldata0 = valignb(dataF8, data70, align);
p0 = cmp.gt(rest, #16);
}
{
data70 = dataF8;
if(p0) dataF8 = memd(ptr_in++#8);
rest = add(rest, #-8);
}:endloop0
.Lkernel:
{
/* kernel is at least 32bytes */
p3 = cmp.gtu(kernel, #0);
/* last itn. remove edge effects */
if(p3.new) kernel = add(kernel, #-1);
/* dealt with in last dword loop */
if(p3.new) epilog = add(epilog, #32);
}
{
nokernel = cmp.eq(kernel, #0); /* after adjustment, recheck */
if(nokernel.new) jump:NT .Lepilog; /* likely not taken */
inc = combine(#32, #-1);
p3 = cmp.gtu(dalign, #24);
}
{
if(p3) jump .Lodd_alignment;
}
{
loop0(.Loword_loop_25to31, kernel);
kernel1 = cmp.gtu(kernel, #1);
rest = kernel;
}
.falign
.Loword_loop_25to31:
{
dcfetch(ptr_in_p_128); /* prefetch 4 lines ahead */
if(kernel1) ptr_out_p_32 = add(ptr_out_p_32, #32);
}
{
dczeroa(ptr_out_p_32); /* reserve the next 32bytes in cache */
p3 = cmp.eq(kernel, rest);
}
{
/* kernel -= 1 */
ptr_in_p_128kernel = vaddw(ptr_in_p_128kernel, inc);
/* kill write on first iteration */
if(!p3) memd(ptr_out++#8) = ldata1;
ldata1 = valignb(dataF8, data70, align);
data70 = memd(ptr_in++#8);
}
{
memd(ptr_out++#8) = ldata0;
ldata0 = valignb(data70, dataF8, align);
dataF8 = memd(ptr_in++#8);
}
{
memd(ptr_out++#8) = ldata1;
ldata1 = valignb(dataF8, data70, align);
data70 = memd(ptr_in++#8);
}
{
memd(ptr_out++#8) = ldata0;
ldata0 = valignb(data70, dataF8, align);
dataF8 = memd(ptr_in++#8);
kernel1 = cmp.gtu(kernel, #1);
}:endloop0
{
memd(ptr_out++#8) = ldata1;
jump .Lepilog;
}
.Lodd_alignment:
{
loop0(.Loword_loop_00to24, kernel);
kernel1 = cmp.gtu(kernel, #1);
rest = add(kernel, #-1);
}
.falign
.Loword_loop_00to24:
{
dcfetch(ptr_in_p_128); /* prefetch 4 lines ahead */
ptr_in_p_128kernel = vaddw(ptr_in_p_128kernel, inc);
if(kernel1) ptr_out_p_32 = add(ptr_out_p_32, #32);
}
{
dczeroa(ptr_out_p_32); /* reserve the next 32bytes in cache */
}
{
memd(ptr_out++#8) = ldata0;
ldata0 = valignb(dataF8, data70, align);
data70 = memd(ptr_in++#8);
}
{
memd(ptr_out++#8) = ldata0;
ldata0 = valignb(data70, dataF8, align);
dataF8 = memd(ptr_in++#8);
}
{
memd(ptr_out++#8) = ldata0;
ldata0 = valignb(dataF8, data70, align);
data70 = memd(ptr_in++#8);
}
{
memd(ptr_out++#8) = ldata0;
ldata0 = valignb(data70, dataF8, align);
dataF8 = memd(ptr_in++#8);
kernel1 = cmp.gtu(kernel, #1);
}:endloop0
.Lepilog:
{
noepilog = cmp.eq(epilog,#0);
epilogdws = lsr(epilog, #3);
kernel = and(epilog, #7);
}
{
if(noepilog) jumpr r31;
if(noepilog) ptr_out = sub(ptr_out, len);
p3 = cmp.eq(epilogdws, #0);
shift2 = asl(epilog, #3);
}
{
shiftb = and(shift2, #32);
ifword = tstbit(epilog,#2);
if(p3) jump .Lepilog60;
if(!p3) epilog = add(epilog, #-16);
}
{
loop0(.Ldword_loop_epilog, epilogdws);
/* stop criteria is lsbs unless = 0 then its 8 */
p3 = cmp.eq(kernel, #0);
if(p3.new) kernel= #8;
p1 = cmp.gt(over, #0);
}
/* if not aligned to end of buffer execute 1 more iteration */
if(p1) kernel= #0;
.Ldword_loop_epilog:
{
memd(ptr_out++#8) = ldata0;
ldata0 = valignb(dataF8, data70, align);
p3 = cmp.gt(epilog, kernel);
}
{
data70 = dataF8;
if(p3) dataF8 = memd(ptr_in++#8);
epilog = add(epilog, #-8);
}:endloop0
/* copy last 7 bytes */
.Lepilog60:
{
if(ifword) memw(ptr_out++#4) = data0;
ldata0 = lsr(ldata0, shiftb);
ifhword = tstbit(epilog,#1);
shiftb = and(shift2, #16);
}
{
if(ifhword) memh(ptr_out++#2) = data0;
ldata0 = lsr(ldata0, shiftb);
ifbyte = tstbit(epilog,#0);
if(ifbyte.new) len = add(len, #-1);
}
{
if(ifbyte) memb(ptr_out) = data0;
ptr_out = sub(ptr_out, len); /* return dest pointer */
jumpr r31;
}
/* do byte copy for small n */
.Lbytes23orless:
{
p3 = sp1loop0(.Lbyte_copy, len);
len = add(len, #-1);
}
.Lbyte_copy:
{
data0 = memb(ptr_in++#1);
if(p3) memb(ptr_out++#1) = data0;
}:endloop0
{
memb(ptr_out) = data0;
ptr_out = sub(ptr_out, len);
jumpr r31;
}
/* do dword copies for aligned in, out and length */
.Ldwordaligned:
{
p3 = sp1loop0(.Ldword_copy, len8);
}
.Ldword_copy:
{
if(p3) memd(ptr_out++#8) = ldata0;
ldata0 = memd(ptr_in++#8);
}:endloop0
{
memd(ptr_out) = ldata0;
ptr_out = sub(ptr_out, len);
jumpr r31; /* return to function caller */
}
.Lmemcpy_return:
r21:20 = memd(sp+#16); /* restore r20+r21 */
{
r25:24 = memd(sp+#8); /* restore r24+r25 */
r17:16 = memd(sp+#0); /* restore r16+r17 */
}
deallocframe; /* restore r31 and incrment stack by 16 */
jumpr r31
/*
* Copyright (c) 2011 Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*/
/* HEXAGON assembly optimized memset */
/* Replaces the standard library function memset */
.macro HEXAGON_OPT_FUNC_BEGIN name
.text
.p2align 4
.globl \name
.type \name, @function
\name:
.endm
.macro HEXAGON_OPT_FUNC_FINISH name
.size \name, . - \name
.endm
/* FUNCTION: memset (v2 version) */
#if __HEXAGON_ARCH__ < 3
HEXAGON_OPT_FUNC_BEGIN memset
{
r6 = #8
r7 = extractu(r0, #3 , #0)
p0 = cmp.eq(r2, #0)
p1 = cmp.gtu(r2, #7)
}
{
r4 = vsplatb(r1)
r8 = r0 /* leave r0 intact for return val */
r9 = sub(r6, r7) /* bytes until double alignment */
if p0 jumpr r31 /* count == 0, so return */
}
{
r3 = #0
r7 = #0
p0 = tstbit(r9, #0)
if p1 jump 2f /* skip byte loop */
}
/* less than 8 bytes to set, so just set a byte at a time and return */
loop0(1f, r2) /* byte loop */
.falign
1: /* byte loop */
{
memb(r8++#1) = r4
}:endloop0
jumpr r31
.falign
2: /* skip byte loop */
{
r6 = #1
p0 = tstbit(r9, #1)
p1 = cmp.eq(r2, #1)
if !p0 jump 3f /* skip initial byte store */
}
{
memb(r8++#1) = r4
r3:2 = sub(r3:2, r7:6)
if p1 jumpr r31
}
.falign
3: /* skip initial byte store */
{
r6 = #2
p0 = tstbit(r9, #2)
p1 = cmp.eq(r2, #2)
if !p0 jump 4f /* skip initial half store */
}
{
memh(r8++#2) = r4
r3:2 = sub(r3:2, r7:6)
if p1 jumpr r31
}
.falign
4: /* skip initial half store */
{
r6 = #4
p0 = cmp.gtu(r2, #7)
p1 = cmp.eq(r2, #4)
if !p0 jump 5f /* skip initial word store */
}
{
memw(r8++#4) = r4
r3:2 = sub(r3:2, r7:6)
p0 = cmp.gtu(r2, #11)
if p1 jumpr r31
}
.falign
5: /* skip initial word store */
{
r10 = lsr(r2, #3)
p1 = cmp.eq(r3, #1)
if !p0 jump 7f /* skip double loop */
}
{
r5 = r4
r6 = #8
loop0(6f, r10) /* double loop */
}
/* set bytes a double word at a time */
.falign
6: /* double loop */
{
memd(r8++#8) = r5:4
r3:2 = sub(r3:2, r7:6)
p1 = cmp.eq(r2, #8)
}:endloop0
.falign
7: /* skip double loop */
{
p0 = tstbit(r2, #2)
if p1 jumpr r31
}
{
r6 = #4
p0 = tstbit(r2, #1)
p1 = cmp.eq(r2, #4)
if !p0 jump 8f /* skip final word store */
}
{
memw(r8++#4) = r4
r3:2 = sub(r3:2, r7:6)
if p1 jumpr r31
}
.falign
8: /* skip final word store */
{
p1 = cmp.eq(r2, #2)
if !p0 jump 9f /* skip final half store */
}
{
memh(r8++#2) = r4
if p1 jumpr r31
}
.falign
9: /* skip final half store */
{
memb(r8++#1) = r4
jumpr r31
}
HEXAGON_OPT_FUNC_FINISH memset
#endif
/* FUNCTION: memset (v3 and higher version) */
#if __HEXAGON_ARCH__ >= 3
HEXAGON_OPT_FUNC_BEGIN memset
{
r7=vsplatb(r1)
r6 = r0
if (r2==#0) jump:nt .L1
}
{
r5:4=combine(r7,r7)
p0 = cmp.gtu(r2,#8)
if (p0.new) jump:nt .L3
}
{
r3 = r0
loop0(.L47,r2)
}
.falign
.L47:
{
memb(r3++#1) = r1
}:endloop0 /* start=.L47 */
jumpr r31
.L3:
{
p0 = tstbit(r0,#0)
if (!p0.new) jump:nt .L8
p1 = cmp.eq(r2, #1)
}
{
r6 = add(r0, #1)
r2 = add(r2,#-1)
memb(r0) = r1
if (p1) jump .L1
}
.L8:
{
p0 = tstbit(r6,#1)
if (!p0.new) jump:nt .L10
}
{
r2 = add(r2,#-2)
memh(r6++#2) = r7
p0 = cmp.eq(r2, #2)
if (p0.new) jump:nt .L1
}
.L10:
{
p0 = tstbit(r6,#2)
if (!p0.new) jump:nt .L12
}
{
r2 = add(r2,#-4)
memw(r6++#4) = r7
p0 = cmp.eq(r2, #4)
if (p0.new) jump:nt .L1
}
.L12:
{
p0 = cmp.gtu(r2,#127)
if (!p0.new) jump:nt .L14
}
r3 = and(r6,#31)
if (r3==#0) jump:nt .L17
{
memd(r6++#8) = r5:4
r2 = add(r2,#-8)
}
r3 = and(r6,#31)
if (r3==#0) jump:nt .L17
{
memd(r6++#8) = r5:4
r2 = add(r2,#-8)
}
r3 = and(r6,#31)
if (r3==#0) jump:nt .L17
{
memd(r6++#8) = r5:4
r2 = add(r2,#-8)
}
.L17:
{
r3 = lsr(r2,#5)
if (r1!=#0) jump:nt .L18
}
{
r8 = r3
r3 = r6
loop0(.L46,r3)
}
.falign
.L46:
{
dczeroa(r6)
r6 = add(r6,#32)
r2 = add(r2,#-32)
}:endloop0 /* start=.L46 */
.L14:
{
p0 = cmp.gtu(r2,#7)
if (!p0.new) jump:nt .L28
r8 = lsr(r2,#3)
}
loop0(.L44,r8)
.falign
.L44:
{
memd(r6++#8) = r5:4
r2 = add(r2,#-8)
}:endloop0 /* start=.L44 */
.L28:
{
p0 = tstbit(r2,#2)
if (!p0.new) jump:nt .L33
}
{
r2 = add(r2,#-4)
memw(r6++#4) = r7
}
.L33:
{
p0 = tstbit(r2,#1)
if (!p0.new) jump:nt .L35
}
{
r2 = add(r2,#-2)
memh(r6++#2) = r7
}
.L35:
p0 = cmp.eq(r2,#1)
if (p0) memb(r6) = r1
.L1:
jumpr r31
.L18:
loop0(.L45,r3)
.falign
.L45:
dczeroa(r6)
{
memd(r6++#8) = r5:4
r2 = add(r2,#-32)
}
memd(r6++#8) = r5:4
memd(r6++#8) = r5:4
{
memd(r6++#8) = r5:4
}:endloop0 /* start=.L45 */
jump .L14
HEXAGON_OPT_FUNC_FINISH memset
#endif
#
# Makefile for Hexagon memory management subsystem
#
obj-y := init.o pgalloc.o ioremap.o uaccess.o vm_fault.o cache.o
obj-y += copy_to_user.o copy_from_user.o strnlen_user.o vm_tlb.o
/*
* Cache management functions for Hexagon
*
* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*/
#include <linux/mm.h>
#include <asm/cacheflush.h>
#include <asm/hexagon_vm.h>
#define spanlines(start, end) \
(((end - (start & ~(LINESIZE - 1))) >> LINEBITS) + 1)
void flush_dcache_range(unsigned long start, unsigned long end)
{
unsigned long lines = spanlines(start, end-1);
unsigned long i, flags;
start &= ~(LINESIZE - 1);
local_irq_save(flags);
for (i = 0; i < lines; i++) {
__asm__ __volatile__ (
" dccleaninva(%0); "
:
: "r" (start)
);
start += LINESIZE;
}
local_irq_restore(flags);
}
void flush_icache_range(unsigned long start, unsigned long end)
{
unsigned long lines = spanlines(start, end-1);
unsigned long i, flags;
start &= ~(LINESIZE - 1);
local_irq_save(flags);
for (i = 0; i < lines; i++) {
__asm__ __volatile__ (
" dccleana(%0); "
" icinva(%0); "
:
: "r" (start)
);
start += LINESIZE;
}
__asm__ __volatile__ (
"isync"
);
local_irq_restore(flags);
}
void hexagon_clean_dcache_range(unsigned long start, unsigned long end)
{
unsigned long lines = spanlines(start, end-1);
unsigned long i, flags;
start &= ~(LINESIZE - 1);
local_irq_save(flags);
for (i = 0; i < lines; i++) {
__asm__ __volatile__ (
" dccleana(%0); "
:
: "r" (start)
);
start += LINESIZE;
}
local_irq_restore(flags);
}
void hexagon_inv_dcache_range(unsigned long start, unsigned long end)
{
unsigned long lines = spanlines(start, end-1);
unsigned long i, flags;
start &= ~(LINESIZE - 1);
local_irq_save(flags);
for (i = 0; i < lines; i++) {
__asm__ __volatile__ (
" dcinva(%0); "
:
: "r" (start)
);
start += LINESIZE;
}
local_irq_restore(flags);
}
/*
* This is just really brutal and shouldn't be used anyways,
* especially on V2. Left here just in case.
*/
void flush_cache_all_hexagon(void)
{
unsigned long flags;
local_irq_save(flags);
__vmcache_ickill();
__vmcache_dckill();
__vmcache_l2kill();
local_irq_restore(flags);
mb();
}
/*
* User memory copy functions for kernel
*
* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*/
/*
* The right way to do this involves valignb
* The easy way to do this is only speed up src/dest similar alignment.
*/
/*
* Copy to/from user are the same, except that for packets with a load and
* a store, I don't know how to tell which kind of exception we got.
* Therefore, we duplicate the function, and handle faulting addresses
* differently for each function
*/
/*
* copy from user: loads can fault
*/
#define src_sav r13
#define dst_sav r12
#define src_dst_sav r13:12
#define d_dbuf r15:14
#define w_dbuf r15
#define dst r0
#define src r1
#define bytes r2
#define loopcount r5
#define FUNCNAME __copy_from_user_hexagon
#include "copy_user_template.S"
/* LOAD FAULTS from COPY_FROM_USER */
/* Alignment loop. r2 has been updated. Return it. */
.falign
1009:
2009:
4009:
{
r0 = r2
jumpr r31
}
/* Normal copy loops. Do epilog. Use src-src_sav to compute distance */
/* X - (A - B) == X + B - A */
.falign
8089:
{
memd(dst) = d_dbuf
r2 += sub(src_sav,src)
}
{
r0 = r2
jumpr r31
}
.falign
4089:
{
memw(dst) = w_dbuf
r2 += sub(src_sav,src)
}
{
r0 = r2
jumpr r31
}
.falign
2089:
{
memh(dst) = w_dbuf
r2 += sub(src_sav,src)
}
{
r0 = r2
jumpr r31
}
.falign
1089:
{
memb(dst) = w_dbuf
r2 += sub(src_sav,src)
}
{
r0 = r2
jumpr r31
}
/* COPY FROM USER: only loads can fail */
.section __ex_table,"a"
.long 1000b,1009b
.long 2000b,2009b
.long 4000b,4009b
.long 8080b,8089b
.long 4080b,4089b
.long 2080b,2089b
.long 1080b,1089b
.previous
/*
* User memory copying routines for the Hexagon Kernel
*
* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*/
/* The right way to do this involves valignb
* The easy way to do this is only speed up src/dest similar alignment.
*/
/*
* Copy to/from user are the same, except that for packets with a load and
* a store, I don't know how to tell which kind of exception we got.
* Therefore, we duplicate the function, and handle faulting addresses
* differently for each function
*/
/*
* copy to user: stores can fault
*/
#define src_sav r13
#define dst_sav r12
#define src_dst_sav r13:12
#define d_dbuf r15:14
#define w_dbuf r15
#define dst r0
#define src r1
#define bytes r2
#define loopcount r5
#define FUNCNAME __copy_to_user_hexagon
#include "copy_user_template.S"
/* STORE FAULTS from COPY_TO_USER */
.falign
1109:
2109:
4109:
/* Alignment loop. r2 has been updated. Return it. */
{
r0 = r2
jumpr r31
}
/* Normal copy loops. Use dst-dst_sav to compute distance */
/* dst holds best write, no need to unwind any loops */
/* X - (A - B) == X + B - A */
.falign
8189:
8199:
4189:
4199:
2189:
2199:
1189:
1199:
{
r2 += sub(dst_sav,dst)
}
{
r0 = r2
jumpr r31
}
/* COPY TO USER: only stores can fail */
.section __ex_table,"a"
.long 1100b,1109b
.long 2100b,2109b
.long 4100b,4109b
.long 8180b,8189b
.long 8190b,8199b
.long 4180b,4189b
.long 4190b,4199b
.long 2180b,2189b
.long 2190b,2199b
.long 1180b,1189b
.long 1190b,1199b
.previous
/*
* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*/
/* Numerology:
* WXYZ
* W: width in bytes
* X: Load=0, Store=1
* Y: Location 0=preamble,8=loop,9=epilog
* Z: Location=0,handler=9
*/
.text
.global FUNCNAME
.type FUNCNAME, @function
.p2align 5
FUNCNAME:
{
p0 = cmp.gtu(bytes,#0)
if (!p0.new) jump:nt .Ldone
r3 = or(dst,src)
r4 = xor(dst,src)
}
{
p1 = cmp.gtu(bytes,#15)
p0 = bitsclr(r3,#7)
if (!p0.new) jump:nt .Loop_not_aligned_8
src_dst_sav = combine(src,dst)
}
{
loopcount = lsr(bytes,#3)
if (!p1) jump .Lsmall
}
p3=sp1loop0(.Loop8,loopcount)
.Loop8:
8080:
8180:
{
if (p3) memd(dst++#8) = d_dbuf
d_dbuf = memd(src++#8)
}:endloop0
8190:
{
memd(dst++#8) = d_dbuf
bytes -= asl(loopcount,#3)
jump .Lsmall
}
.Loop_not_aligned_8:
{
p0 = bitsclr(r4,#7)
if (p0.new) jump:nt .Lalign
}
{
p0 = bitsclr(r3,#3)
if (!p0.new) jump:nt .Loop_not_aligned_4
p1 = cmp.gtu(bytes,#7)
}
{
if (!p1) jump .Lsmall
loopcount = lsr(bytes,#2)
}
p3=sp1loop0(.Loop4,loopcount)
.Loop4:
4080:
4180:
{
if (p3) memw(dst++#4) = w_dbuf
w_dbuf = memw(src++#4)
}:endloop0
4190:
{
memw(dst++#4) = w_dbuf
bytes -= asl(loopcount,#2)
jump .Lsmall
}
.Loop_not_aligned_4:
{
p0 = bitsclr(r3,#1)
if (!p0.new) jump:nt .Loop_not_aligned
p1 = cmp.gtu(bytes,#3)
}
{
if (!p1) jump .Lsmall
loopcount = lsr(bytes,#1)
}
p3=sp1loop0(.Loop2,loopcount)
.Loop2:
2080:
2180:
{
if (p3) memh(dst++#2) = w_dbuf
w_dbuf = memuh(src++#2)
}:endloop0
2190:
{
memh(dst++#2) = w_dbuf
bytes -= asl(loopcount,#1)
jump .Lsmall
}
.Loop_not_aligned: /* Works for as small as one byte */
p3=sp1loop0(.Loop1,bytes)
.Loop1:
1080:
1180:
{
if (p3) memb(dst++#1) = w_dbuf
w_dbuf = memub(src++#1)
}:endloop0
/* Done */
1190:
{
memb(dst) = w_dbuf
jumpr r31
r0 = #0
}
.Lsmall:
{
p0 = cmp.gtu(bytes,#0)
if (p0.new) jump:nt .Loop_not_aligned
}
.Ldone:
{
r0 = #0
jumpr r31
}
.falign
.Lalign:
1000:
{
if (p0.new) w_dbuf = memub(src)
p0 = tstbit(src,#0)
if (!p1) jump .Lsmall
}
1100:
{
if (p0) memb(dst++#1) = w_dbuf
if (p0) bytes = add(bytes,#-1)
if (p0) src = add(src,#1)
}
2000:
{
if (p0.new) w_dbuf = memuh(src)
p0 = tstbit(src,#1)
if (!p1) jump .Lsmall
}
2100:
{
if (p0) memh(dst++#2) = w_dbuf
if (p0) bytes = add(bytes,#-2)
if (p0) src = add(src,#2)
}
4000:
{
if (p0.new) w_dbuf = memw(src)
p0 = tstbit(src,#2)
if (!p1) jump .Lsmall
}
4100:
{
if (p0) memw(dst++#4) = w_dbuf
if (p0) bytes = add(bytes,#-4)
if (p0) src = add(src,#4)
jump FUNCNAME
}
.size FUNCNAME,.-FUNCNAME
/*
* Memory subsystem initialization for Hexagon
*
* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*/
#include <linux/init.h>
#include <linux/mm.h>
#include <linux/bootmem.h>
#include <asm/atomic.h>
#include <linux/highmem.h>
#include <asm/tlb.h>
#include <asm/sections.h>
#include <asm/vm_mmu.h>
/*
* Define a startpg just past the end of the kernel image and a lastpg
* that corresponds to the end of real or simulated platform memory.
*/
#define bootmem_startpg (PFN_UP(((unsigned long) _end) - PAGE_OFFSET))
unsigned long bootmem_lastpg; /* Should be set by platform code */
/* Set as variable to limit PMD copies */
int max_kernel_seg = 0x303;
/* think this should be (page_size-1) the way it's used...*/
unsigned long zero_page_mask;
/* indicate pfn's of high memory */
unsigned long highstart_pfn, highend_pfn;
/* struct mmu_gather defined in asm-generic.h; */
DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
/* Default cache attribute for newly created page tables */
unsigned long _dflt_cache_att = CACHEDEF;
/*
* The current "generation" of kernel map, which should not roll
* over until Hell freezes over. Actual bound in years needs to be
* calculated to confirm.
*/
DEFINE_SPINLOCK(kmap_gen_lock);
/* checkpatch says don't init this to 0. */
unsigned long long kmap_generation;
/*
* mem_init - initializes memory
*
* Frees up bootmem
* Fixes up more stuff for HIGHMEM
* Calculates and displays memory available/used
*/
void __init mem_init(void)
{
/* No idea where this is actually declared. Seems to evade LXR. */
totalram_pages += free_all_bootmem();
num_physpages = bootmem_lastpg; /* seriously, what? */
printk(KERN_INFO "totalram_pages = %ld\n", totalram_pages);
/*
* To-Do: someone somewhere should wipe out the bootmem map
* after we're done?
*/
/*
* This can be moved to some more virtual-memory-specific
* initialization hook at some point. Set the init_mm
* descriptors "context" value to point to the initial
* kernel segment table's physical address.
*/
init_mm.context.ptbase = __pa(init_mm.pgd);
}
/*
* free_initmem - frees memory used by stuff declared with __init
*
* Todo: free pages between __init_begin and __init_end; possibly
* some devtree related stuff as well.
*/
void __init_refok free_initmem(void)
{
}
/*
* free_initrd_mem - frees... initrd memory.
* @start - start of init memory
* @end - end of init memory
*
* Apparently has to be passed the address of the initrd memory.
*
* Wrapped by #ifdef CONFIG_BLKDEV_INITRD
*/
void free_initrd_mem(unsigned long start, unsigned long end)
{
}
void sync_icache_dcache(pte_t pte)
{
unsigned long addr;
struct page *page;
page = pte_page(pte);
addr = (unsigned long) page_address(page);
__vmcache_idsync(addr, PAGE_SIZE);
}
/*
* In order to set up page allocator "nodes",
* somebody has to call free_area_init() for UMA.
*
* In this mode, we only have one pg_data_t
* structure: contig_mem_data.
*/
void __init paging_init(void)
{
unsigned long zones_sizes[MAX_NR_ZONES] = {0, };
/*
* This is not particularly well documented anywhere, but
* give ZONE_NORMAL all the memory, including the big holes
* left by the kernel+bootmem_map which are already left as reserved
* in the bootmem_map; free_area_init should see those bits and
* adjust accordingly.
*/
zones_sizes[ZONE_NORMAL] = max_low_pfn;
free_area_init(zones_sizes); /* sets up the zonelists and mem_map */
/*
* Start of high memory area. Will probably need something more
* fancy if we... get more fancy.
*/
high_memory = (void *)((bootmem_lastpg + 1) << PAGE_SHIFT);
}
#ifndef DMA_RESERVE
#define DMA_RESERVE (4)
#endif
#define DMA_CHUNKSIZE (1<<22)
#define DMA_RESERVED_BYTES (DMA_RESERVE * DMA_CHUNKSIZE)
/*
* Pick out the memory size. We look for mem=size,
* where size is "size[KkMm]"
*/
static int __init early_mem(char *p)
{
unsigned long size;
char *endp;
size = memparse(p, &endp);
bootmem_lastpg = PFN_DOWN(size);
return 0;
}
early_param("mem", early_mem);
size_t hexagon_coherent_pool_size = (size_t) (DMA_RESERVE << 22);
void __init setup_arch_memory(void)
{
int bootmap_size;
/* XXX Todo: this probably should be cleaned up */
u32 *segtable = (u32 *) &swapper_pg_dir[0];
u32 *segtable_end;
/*
* Set up boot memory allocator
*
* The Gorman book also talks about these functions.
* This needs to change for highmem setups.
*/
/* Memory size needs to be a multiple of 16M */
bootmem_lastpg = PFN_DOWN((bootmem_lastpg << PAGE_SHIFT) &
~((BIG_KERNEL_PAGE_SIZE) - 1));
/*
* Reserve the top DMA_RESERVE bytes of RAM for DMA (uncached)
* memory allocation
*/
bootmap_size = init_bootmem(bootmem_startpg, bootmem_lastpg -
PFN_DOWN(DMA_RESERVED_BYTES));
printk(KERN_INFO "bootmem_startpg: 0x%08lx\n", bootmem_startpg);
printk(KERN_INFO "bootmem_lastpg: 0x%08lx\n", bootmem_lastpg);
printk(KERN_INFO "bootmap_size: %d\n", bootmap_size);
printk(KERN_INFO "max_low_pfn: 0x%08lx\n", max_low_pfn);
/*
* The default VM page tables (will be) populated with
* VA=PA+PAGE_OFFSET mapping. We go in and invalidate entries
* higher than what we have memory for.
*/
/* this is pointer arithmetic; each entry covers 4MB */
segtable = segtable + (PAGE_OFFSET >> 22);
/* this actually only goes to the end of the first gig */
segtable_end = segtable + (1<<(30-22));
/* Move forward to the start of empty pages */
segtable += bootmem_lastpg >> (22-PAGE_SHIFT);
{
int i;
for (i = 1 ; i <= DMA_RESERVE ; i++)
segtable[-i] = ((segtable[-i] & __HVM_PTE_PGMASK_4MB)
| __HVM_PTE_R | __HVM_PTE_W | __HVM_PTE_X
| __HEXAGON_C_UNC << 6
| __HVM_PDE_S_4MB);
}
printk(KERN_INFO "clearing segtable from %p to %p\n", segtable,
segtable_end);
while (segtable < (segtable_end-8))
*(segtable++) = __HVM_PDE_S_INVALID;
/* stop the pointer at the device I/O 4MB page */
printk(KERN_INFO "segtable = %p (should be equal to _K_io_map)\n",
segtable);
#if 0
/* Other half of the early device table from vm_init_segtable. */
printk(KERN_INFO "&_K_init_devicetable = 0x%08x\n",
(unsigned long) _K_init_devicetable-PAGE_OFFSET);
*segtable = ((u32) (unsigned long) _K_init_devicetable-PAGE_OFFSET) |
__HVM_PDE_S_4KB;
printk(KERN_INFO "*segtable = 0x%08x\n", *segtable);
#endif
/*
* Free all the memory that wasn't taken up by the bootmap, the DMA
* reserve, or kernel itself.
*/
free_bootmem(PFN_PHYS(bootmem_startpg)+bootmap_size,
PFN_PHYS(bootmem_lastpg - bootmem_startpg) - bootmap_size -
DMA_RESERVED_BYTES);
/*
* The bootmem allocator seemingly just lives to feed memory
* to the paging system
*/
printk(KERN_INFO "PAGE_SIZE=%lu\n", PAGE_SIZE);
paging_init(); /* See Gorman Book, 2.3 */
/*
* At this point, the page allocator is kind of initialized, but
* apparently no pages are available (just like with the bootmem
* allocator), and need to be freed themselves via mem_init(),
* which is called by start_kernel() later on in the process
*/
}
/*
* I/O remap functions for Hexagon
*
* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*/
#include <linux/io.h>
#include <linux/vmalloc.h>
void __iomem *ioremap_nocache(unsigned long phys_addr, unsigned long size)
{
unsigned long last_addr, addr;
unsigned long offset = phys_addr & ~PAGE_MASK;
struct vm_struct *area;
pgprot_t prot = __pgprot(_PAGE_PRESENT|_PAGE_READ|_PAGE_WRITE
|(__HEXAGON_C_DEV << 6));
last_addr = phys_addr + size - 1;
/* Wrapping not allowed */
if (!size || (last_addr < phys_addr))
return NULL;
/* Rounds up to next page size, including whole-page offset */
size = PAGE_ALIGN(offset + size);
area = get_vm_area(size, VM_IOREMAP);
addr = (unsigned long)area->addr;
if (ioremap_page_range(addr, addr+size, phys_addr, prot)) {
vunmap((void *)addr);
return NULL;
}
return (void __iomem *) (offset + addr);
}
void __iounmap(const volatile void __iomem *addr)
{
vunmap((void *) ((unsigned long) addr & PAGE_MASK));
}
/*
* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*/
#include <linux/init.h>
void __init pgtable_cache_init(void)
{
}
/*
* User string length functions for kernel
*
* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*/
#define isrc r0
#define max r1 /* Do not change! */
#define end r2
#define tmp1 r3
#define obo r6 /* off-by-one */
#define start r7
#define mod8 r8
#define dbuf r15:14
#define dcmp r13:12
/*
* The vector mask version of this turned out *really* badly.
* The hardware loop version also turned out *really* badly.
* Seems straight pointer arithmetic basically wins here.
*/
#define fname __strnlen_user
.text
.global fname
.type fname, @function
.p2align 5 /* why? */
fname:
{
mod8 = and(isrc,#7);
end = add(isrc,max);
start = isrc;
}
{
P0 = cmp.eq(mod8,#0);
mod8 = and(end,#7);
dcmp = #0;
if (P0.new) jump:t dw_loop; /* fire up the oven */
}
alignment_loop:
fail_1: {
tmp1 = memb(start++#1);
}
{
P0 = cmp.eq(tmp1,#0);
if (P0.new) jump:nt exit_found;
P1 = cmp.gtu(end,start);
mod8 = and(start,#7);
}
{
if (!P1) jump exit_error; /* hit the end */
P0 = cmp.eq(mod8,#0);
}
{
if (!P0) jump alignment_loop;
}
dw_loop:
fail_2: {
dbuf = memd(start);
obo = add(start,#1);
}
{
P0 = vcmpb.eq(dbuf,dcmp);
}
{
tmp1 = P0;
P0 = cmp.gtu(end,start);
}
{
tmp1 = ct0(tmp1);
mod8 = and(end,#7);
if (!P0) jump end_check;
}
{
P0 = cmp.eq(tmp1,#32);
if (!P0.new) jump:nt exit_found;
if (!P0.new) start = add(obo,tmp1);
}
{
start = add(start,#8);
jump dw_loop;
} /* might be nice to combine these jumps... */
end_check:
{
P0 = cmp.gt(tmp1,mod8);
if (P0.new) jump:nt exit_error; /* neverfound! */
start = add(obo,tmp1);
}
exit_found:
{
R0 = sub(start,isrc);
jumpr R31;
}
exit_error:
{
R0 = add(max,#1);
jumpr R31;
}
/* Uh, what does the "fixup" return here? */
.falign
fix_1:
{
R0 = #0;
jumpr R31;
}
.size fname,.-fname
.section __ex_table,"a"
.long fail_1,fix_1
.long fail_2,fix_1
.previous
/*
* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*/
/*
* Support for user memory access from kernel. This will
* probably be inlined for performance at some point, but
* for ease of debug, and to a lesser degree for code size,
* we implement here as subroutines.
*/
#include <linux/types.h>
#include <asm/uaccess.h>
#include <asm/pgtable.h>
/*
* For clear_user(), exploit previously defined copy_to_user function
* and the fact that we've got a handy zero page defined in kernel/head.S
*
* dczero here would be even faster.
*/
__kernel_size_t __clear_user_hexagon(void __user *dest, unsigned long count)
{
long uncleared;
while (count > PAGE_SIZE) {
uncleared = __copy_to_user_hexagon(dest, &empty_zero_page,
PAGE_SIZE);
if (uncleared)
return count - (PAGE_SIZE - uncleared);
count -= PAGE_SIZE;
dest += PAGE_SIZE;
}
if (count)
count = __copy_to_user_hexagon(dest, &empty_zero_page, count);
return count;
}
unsigned long clear_user_hexagon(void __user *dest, unsigned long count)
{
if (!access_ok(VERIFY_WRITE, dest, count))
return count;
else
return __clear_user_hexagon(dest, count);
}
/*
* Memory fault handling for Hexagon
*
* Copyright (c) 2010-2011 Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*/
/*
* Page fault handling for the Hexagon Virtual Machine.
* Can also be called by a native port emulating the HVM
* execptions.
*/
#include <asm/pgtable.h>
#include <asm/traps.h>
#include <asm/uaccess.h>
#include <linux/mm.h>
#include <linux/signal.h>
#include <linux/module.h>
#include <linux/hardirq.h>
/*
* Decode of hardware exception sends us to one of several
* entry points. At each, we generate canonical arguments
* for handling by the abstract memory management code.
*/
#define FLT_IFETCH -1
#define FLT_LOAD 0
#define FLT_STORE 1
/*
* Canonical page fault handler
*/
void do_page_fault(unsigned long address, long cause, struct pt_regs *regs)
{
struct vm_area_struct *vma;
struct mm_struct *mm = current->mm;
siginfo_t info;
int si_code = SEGV_MAPERR;
int fault;
const struct exception_table_entry *fixup;
/*
* If we're in an interrupt or have no user context,
* then must not take the fault.
*/
if (unlikely(in_interrupt() || !mm))
goto no_context;
local_irq_enable();
down_read(&mm->mmap_sem);
vma = find_vma(mm, address);
if (!vma)
goto bad_area;
if (vma->vm_start <= address)
goto good_area;
if (!(vma->vm_flags & VM_GROWSDOWN))
goto bad_area;
if (expand_stack(vma, address))
goto bad_area;
good_area:
/* Address space is OK. Now check access rights. */
si_code = SEGV_ACCERR;
switch (cause) {
case FLT_IFETCH:
if (!(vma->vm_flags & VM_EXEC))
goto bad_area;
break;
case FLT_LOAD:
if (!(vma->vm_flags & VM_READ))
goto bad_area;
break;
case FLT_STORE:
if (!(vma->vm_flags & VM_WRITE))
goto bad_area;
break;
}
fault = handle_mm_fault(mm, vma, address, (cause > 0));
/* The most common case -- we are done. */
if (likely(!(fault & VM_FAULT_ERROR))) {
if (fault & VM_FAULT_MAJOR)
current->maj_flt++;
else
current->min_flt++;
up_read(&mm->mmap_sem);
return;
}
up_read(&mm->mmap_sem);
/* Handle copyin/out exception cases */
if (!user_mode(regs))
goto no_context;
if (fault & VM_FAULT_OOM) {
pagefault_out_of_memory();
return;
}
/* User-mode address is in the memory map, but we are
* unable to fix up the page fault.
*/
if (fault & VM_FAULT_SIGBUS) {
info.si_signo = SIGBUS;
info.si_code = BUS_ADRERR;
}
/* Address is not in the memory map */
else {
info.si_signo = SIGSEGV;
info.si_code = SEGV_ACCERR;
}
info.si_errno = 0;
info.si_addr = (void __user *)address;
force_sig_info(info.si_code, &info, current);
return;
bad_area:
up_read(&mm->mmap_sem);
if (user_mode(regs)) {
info.si_signo = SIGSEGV;
info.si_errno = 0;
info.si_code = si_code;
info.si_addr = (void *)address;
force_sig_info(SIGSEGV, &info, current);
return;
}
/* Kernel-mode fault falls through */
no_context:
fixup = search_exception_tables(pt_elr(regs));
if (fixup) {
pt_set_elr(regs, fixup->fixup);
return;
}
/* Things are looking very, very bad now */
bust_spinlocks(1);
printk(KERN_EMERG "Unable to handle kernel paging request at "
"virtual address 0x%08lx, regs %p\n", address, regs);
die("Bad Kernel VA", regs, SIGKILL);
}
void read_protection_fault(struct pt_regs *regs)
{
unsigned long badvadr = pt_badva(regs);
do_page_fault(badvadr, FLT_LOAD, regs);
}
void write_protection_fault(struct pt_regs *regs)
{
unsigned long badvadr = pt_badva(regs);
do_page_fault(badvadr, FLT_STORE, regs);
}
void execute_protection_fault(struct pt_regs *regs)
{
unsigned long badvadr = pt_badva(regs);
do_page_fault(badvadr, FLT_IFETCH, regs);
}
/*
* Hexagon Virtual Machine TLB functions
*
* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*/
/*
* The Hexagon Virtual Machine conceals the real workings of
* the TLB, but there are one or two functions that need to
* be instantiated for it, differently from a native build.
*/
#include <linux/mm.h>
#include <asm/page.h>
#include <asm/hexagon_vm.h>
/*
* Initial VM implementation has only one map active at a time, with
* TLB purgings on changes. So either we're nuking the current map,
* or it's a no-op. This operation is messy on true SMPs where other
* processors must be induced to flush the copies in their local TLBs,
* but Hexagon thread-based virtual processors share the same MMU.
*/
void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
unsigned long end)
{
struct mm_struct *mm = vma->vm_mm;
if (mm->context.ptbase == current->active_mm->context.ptbase)
__vmclrmap((void *)start, end - start);
}
/*
* Flush a page from the kernel virtual map - used by highmem
*/
void flush_tlb_one(unsigned long vaddr)
{
__vmclrmap((void *)vaddr, PAGE_SIZE);
}
/*
* Flush all TLBs across all CPUs, virtual or real.
* A single Hexagon core has 6 thread contexts but
* only one TLB.
*/
void tlb_flush_all(void)
{
/* should probably use that fixaddr end or whateve label */
__vmclrmap(0, 0xffff0000);
}
/*
* Flush TLB entries associated with a given mm_struct mapping.
*/
void flush_tlb_mm(struct mm_struct *mm)
{
/* Current Virtual Machine has only one map active at a time */
if (current->active_mm->context.ptbase == mm->context.ptbase)
tlb_flush_all();
}
/*
* Flush TLB state associated with a page of a vma.
*/
void flush_tlb_page(struct vm_area_struct *vma, unsigned long vaddr)
{
struct mm_struct *mm = vma->vm_mm;
if (mm->context.ptbase == current->active_mm->context.ptbase)
__vmclrmap((void *)vaddr, PAGE_SIZE);
}
/*
* Flush TLB entries associated with a kernel address range.
* Like flush range, but without the check on the vma->vm_mm.
*/
void flush_tlb_kernel_range(unsigned long start, unsigned long end)
{
__vmclrmap((void *)start, end - start);
}
...@@ -33,8 +33,10 @@ extern __wsum csum_partial_copy(const void *src, void *dst, int len, __wsum sum) ...@@ -33,8 +33,10 @@ extern __wsum csum_partial_copy(const void *src, void *dst, int len, __wsum sum)
extern __wsum csum_partial_copy_from_user(const void __user *src, void *dst, extern __wsum csum_partial_copy_from_user(const void __user *src, void *dst,
int len, __wsum sum, int *csum_err); int len, __wsum sum, int *csum_err);
#ifndef csum_partial_copy_nocheck
#define csum_partial_copy_nocheck(src, dst, len, sum) \ #define csum_partial_copy_nocheck(src, dst, len, sum) \
csum_partial_copy((src), (dst), (len), (sum)) csum_partial_copy((src), (dst), (len), (sum))
#endif
/* /*
* This is a version of ip_compute_csum() optimized for IP headers, * This is a version of ip_compute_csum() optimized for IP headers,
...@@ -63,12 +65,14 @@ csum_tcpudp_nofold(__be32 saddr, __be32 daddr, unsigned short len, ...@@ -63,12 +65,14 @@ csum_tcpudp_nofold(__be32 saddr, __be32 daddr, unsigned short len,
unsigned short proto, __wsum sum); unsigned short proto, __wsum sum);
#endif #endif
#ifndef csum_tcpudp_magic
static inline __sum16 static inline __sum16
csum_tcpudp_magic(__be32 saddr, __be32 daddr, unsigned short len, csum_tcpudp_magic(__be32 saddr, __be32 daddr, unsigned short len,
unsigned short proto, __wsum sum) unsigned short proto, __wsum sum)
{ {
return csum_fold(csum_tcpudp_nofold(saddr, daddr, len, proto, sum)); return csum_fold(csum_tcpudp_nofold(saddr, daddr, len, proto, sum));
} }
#endif
/* /*
* this routine is used for miscellaneous IP-like checksums, mainly * this routine is used for miscellaneous IP-like checksums, mainly
......
#ifndef _ASM_POWERPC_RWSEM_H
#define _ASM_POWERPC_RWSEM_H
#ifndef _LINUX_RWSEM_H
#error "Please don't include <asm/rwsem.h> directly, use <linux/rwsem.h> instead."
#endif
#ifdef __KERNEL__
/*
* R/W semaphores for PPC using the stuff in lib/rwsem.c.
* Adapted largely from include/asm-i386/rwsem.h
* by Paul Mackerras <paulus@samba.org>.
*/
/*
* the semaphore definition
*/
#ifdef CONFIG_PPC64
# define RWSEM_ACTIVE_MASK 0xffffffffL
#else
# define RWSEM_ACTIVE_MASK 0x0000ffffL
#endif
#define RWSEM_UNLOCKED_VALUE 0x00000000L
#define RWSEM_ACTIVE_BIAS 0x00000001L
#define RWSEM_WAITING_BIAS (-RWSEM_ACTIVE_MASK-1)
#define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS
#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
/*
* lock for reading
*/
static inline void __down_read(struct rw_semaphore *sem)
{
if (unlikely(atomic_long_inc_return((atomic_long_t *)&sem->count) <= 0))
rwsem_down_read_failed(sem);
}
static inline int __down_read_trylock(struct rw_semaphore *sem)
{
long tmp;
while ((tmp = sem->count) >= 0) {
if (tmp == cmpxchg(&sem->count, tmp,
tmp + RWSEM_ACTIVE_READ_BIAS)) {
return 1;
}
}
return 0;
}
/*
* lock for writing
*/
static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
{
long tmp;
tmp = atomic_long_add_return(RWSEM_ACTIVE_WRITE_BIAS,
(atomic_long_t *)&sem->count);
if (unlikely(tmp != RWSEM_ACTIVE_WRITE_BIAS))
rwsem_down_write_failed(sem);
}
static inline void __down_write(struct rw_semaphore *sem)
{
__down_write_nested(sem, 0);
}
static inline int __down_write_trylock(struct rw_semaphore *sem)
{
long tmp;
tmp = cmpxchg(&sem->count, RWSEM_UNLOCKED_VALUE,
RWSEM_ACTIVE_WRITE_BIAS);
return tmp == RWSEM_UNLOCKED_VALUE;
}
/*
* unlock after reading
*/
static inline void __up_read(struct rw_semaphore *sem)
{
long tmp;
tmp = atomic_long_dec_return((atomic_long_t *)&sem->count);
if (unlikely(tmp < -1 && (tmp & RWSEM_ACTIVE_MASK) == 0))
rwsem_wake(sem);
}
/*
* unlock after writing
*/
static inline void __up_write(struct rw_semaphore *sem)
{
if (unlikely(atomic_long_sub_return(RWSEM_ACTIVE_WRITE_BIAS,
(atomic_long_t *)&sem->count) < 0))
rwsem_wake(sem);
}
/*
* implement atomic add functionality
*/
static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
{
atomic_long_add(delta, (atomic_long_t *)&sem->count);
}
/*
* downgrade write lock to read lock
*/
static inline void __downgrade_write(struct rw_semaphore *sem)
{
long tmp;
tmp = atomic_long_add_return(-RWSEM_WAITING_BIAS,
(atomic_long_t *)&sem->count);
if (tmp < 0)
rwsem_downgrade_wake(sem);
}
/*
* implement exchange and add functionality
*/
static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem)
{
return atomic_long_add_return(delta, (atomic_long_t *)&sem->count);
}
#endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_RWSEM_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment