Commit 02a99ed6 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-linus' of git://git.monstr.eu/linux-2.6-microblaze

* 'for-linus' of git://git.monstr.eu/linux-2.6-microblaze: (55 commits)
  microblaze: Don't use access_ok for unaligned
  microblaze: remove unused flat_stack_align() definition
  microblaze: Fix problem with early_printk in startup
  microblaze_mmu_v2: Makefiles
  microblaze_mmu_v2: Kconfig update
  microblaze_mmu_v2: stat.h MMU update
  microblaze_mmu_v2: Elf update
  microblaze_mmu_v2: Update dma.h for MMU
  microblaze_mmu_v2: Update cacheflush.h
  microblaze_mmu_v2: Update signal returning address
  microblaze_mmu_v2: Traps MMU update
  microblaze_mmu_v2: Enable fork syscall for MMU and add fork as vfork for noMMU
  microblaze_mmu_v2: Update linker script for MMU
  microblaze_mmu_v2: Add MMU related exceptions handling
  microblaze_mmu_v2: uaccess MMU update
  microblaze_mmu_v2: Update exception handling - MMU exception
  microblaze_mmu_v2: entry.S, entry.h
  microblaze_mmu_v2: Add CURRENT_TASK for entry.S
  microblaze_mmu_v2: MMU asm offset update
  microblaze_mmu_v2: Update tlb.h and tlbflush.h
  ...
parents 2b10dc45 3447ef29
...@@ -6,6 +6,7 @@ mainmenu "Linux/Microblaze Kernel Configuration" ...@@ -6,6 +6,7 @@ mainmenu "Linux/Microblaze Kernel Configuration"
config MICROBLAZE config MICROBLAZE
def_bool y def_bool y
select HAVE_LMB select HAVE_LMB
select ARCH_WANT_OPTIONAL_GPIOLIB
config SWAP config SWAP
def_bool n def_bool n
...@@ -49,13 +50,14 @@ config GENERIC_CLOCKEVENTS ...@@ -49,13 +50,14 @@ config GENERIC_CLOCKEVENTS
config GENERIC_HARDIRQS_NO__DO_IRQ config GENERIC_HARDIRQS_NO__DO_IRQ
def_bool y def_bool y
config GENERIC_GPIO
def_bool y
config PCI config PCI
depends on !MMU
def_bool n def_bool n
config NO_DMA config NO_DMA
depends on !MMU def_bool y
def_bool n
source "init/Kconfig" source "init/Kconfig"
...@@ -72,7 +74,8 @@ source "kernel/Kconfig.preempt" ...@@ -72,7 +74,8 @@ source "kernel/Kconfig.preempt"
source "kernel/Kconfig.hz" source "kernel/Kconfig.hz"
config MMU config MMU
def_bool n bool "MMU support"
default n
config NO_MMU config NO_MMU
bool bool
...@@ -105,9 +108,6 @@ config CMDLINE_FORCE ...@@ -105,9 +108,6 @@ config CMDLINE_FORCE
config OF config OF
def_bool y def_bool y
config OF_DEVICE
def_bool y
config PROC_DEVICETREE config PROC_DEVICETREE
bool "Support for device tree in /proc" bool "Support for device tree in /proc"
depends on PROC_FS depends on PROC_FS
...@@ -118,6 +118,113 @@ config PROC_DEVICETREE ...@@ -118,6 +118,113 @@ config PROC_DEVICETREE
endmenu endmenu
menu "Advanced setup"
config ADVANCED_OPTIONS
bool "Prompt for advanced kernel configuration options"
depends on MMU
help
This option will enable prompting for a variety of advanced kernel
configuration options. These options can cause the kernel to not
work if they are set incorrectly, but can be used to optimize certain
aspects of kernel memory management.
Unless you know what you are doing, say N here.
comment "Default settings for advanced configuration options are used"
depends on !ADVANCED_OPTIONS
config HIGHMEM_START_BOOL
bool "Set high memory pool address"
depends on ADVANCED_OPTIONS && HIGHMEM
help
This option allows you to set the base address of the kernel virtual
area used to map high memory pages. This can be useful in
optimizing the layout of kernel virtual memory.
Say N here unless you know what you are doing.
config HIGHMEM_START
hex "Virtual start address of high memory pool" if HIGHMEM_START_BOOL
depends on MMU
default "0xfe000000"
config LOWMEM_SIZE_BOOL
bool "Set maximum low memory"
depends on ADVANCED_OPTIONS
help
This option allows you to set the maximum amount of memory which
will be used as "low memory", that is, memory which the kernel can
access directly, without having to set up a kernel virtual mapping.
This can be useful in optimizing the layout of kernel virtual
memory.
Say N here unless you know what you are doing.
config LOWMEM_SIZE
hex "Maximum low memory size (in bytes)" if LOWMEM_SIZE_BOOL
depends on MMU
default "0x30000000"
config KERNEL_START_BOOL
bool "Set custom kernel base address"
depends on ADVANCED_OPTIONS
help
This option allows you to set the kernel virtual address at which
the kernel will map low memory (the kernel image will be linked at
this address). This can be useful in optimizing the virtual memory
layout of the system.
Say N here unless you know what you are doing.
config KERNEL_START
hex "Virtual address of kernel base" if KERNEL_START_BOOL
default "0xc0000000" if MMU
default KERNEL_BASE_ADDR if !MMU
config TASK_SIZE_BOOL
bool "Set custom user task size"
depends on ADVANCED_OPTIONS
help
This option allows you to set the amount of virtual address space
allocated to user tasks. This can be useful in optimizing the
virtual memory layout of the system.
Say N here unless you know what you are doing.
config TASK_SIZE
hex "Size of user task space" if TASK_SIZE_BOOL
depends on MMU
default "0x80000000"
config CONSISTENT_START_BOOL
bool "Set custom consistent memory pool address"
depends on ADVANCED_OPTIONS && NOT_COHERENT_CACHE
help
This option allows you to set the base virtual address
of the the consistent memory pool. This pool of virtual
memory is used to make consistent memory allocations.
config CONSISTENT_START
hex "Base virtual address of consistent memory pool" if CONSISTENT_START_BOOL
depends on MMU
default "0xff100000" if NOT_COHERENT_CACHE
config CONSISTENT_SIZE_BOOL
bool "Set custom consistent memory pool size"
depends on ADVANCED_OPTIONS && NOT_COHERENT_CACHE
help
This option allows you to set the size of the the
consistent memory pool. This pool of virtual memory
is used to make consistent memory allocations.
config CONSISTENT_SIZE
hex "Size of consistent memory pool" if CONSISTENT_SIZE_BOOL
depends on MMU
default "0x00200000" if NOT_COHERENT_CACHE
endmenu
source "mm/Kconfig" source "mm/Kconfig"
menu "Exectuable file formats" menu "Exectuable file formats"
......
ifeq ($(CONFIG_MMU),y)
UTS_SYSNAME = -DUTS_SYSNAME=\"Linux\"
else
UTS_SYSNAME = -DUTS_SYSNAME=\"uClinux\" UTS_SYSNAME = -DUTS_SYSNAME=\"uClinux\"
endif
# What CPU vesion are we building for, and crack it open # What CPU vesion are we building for, and crack it open
# as major.minor.rev # as major.minor.rev
...@@ -36,6 +40,8 @@ CPUFLAGS-1 += $(call cc-option,-mcpu=v$(CPU_VER)) ...@@ -36,6 +40,8 @@ CPUFLAGS-1 += $(call cc-option,-mcpu=v$(CPU_VER))
# r31 holds current when in kernel mode # r31 holds current when in kernel mode
CFLAGS_KERNEL += -ffixed-r31 $(CPUFLAGS-1) $(CPUFLAGS-2) CFLAGS_KERNEL += -ffixed-r31 $(CPUFLAGS-1) $(CPUFLAGS-2)
LDFLAGS :=
LDFLAGS_vmlinux :=
LDFLAGS_BLOB := --format binary --oformat elf32-microblaze LDFLAGS_BLOB := --format binary --oformat elf32-microblaze
LIBGCC := $(shell $(CC) $(CFLAGS_KERNEL) -print-libgcc-file-name) LIBGCC := $(shell $(CC) $(CFLAGS_KERNEL) -print-libgcc-file-name)
......
...@@ -7,6 +7,8 @@ targets := linux.bin linux.bin.gz ...@@ -7,6 +7,8 @@ targets := linux.bin linux.bin.gz
OBJCOPYFLAGS_linux.bin := -O binary OBJCOPYFLAGS_linux.bin := -O binary
$(obj)/linux.bin: vmlinux FORCE $(obj)/linux.bin: vmlinux FORCE
[ -n $(CONFIG_INITRAMFS_SOURCE) ] && [ ! -e $(CONFIG_INITRAMFS_SOURCE) ] && \
touch $(CONFIG_INITRAMFS_SOURCE) || echo "No CPIO image"
$(call if_changed,objcopy) $(call if_changed,objcopy)
@echo 'Kernel: $@ is ready' ' (#'`cat .version`')' @echo 'Kernel: $@ is ready' ' (#'`cat .version`')'
......
#
# Automatically generated make config: don't edit
# Linux kernel version: 2.6.30-rc6
# Fri May 22 10:02:33 2009
#
CONFIG_MICROBLAZE=y
# CONFIG_SWAP is not set
CONFIG_RWSEM_GENERIC_SPINLOCK=y
# CONFIG_ARCH_HAS_ILOG2_U32 is not set
# CONFIG_ARCH_HAS_ILOG2_U64 is not set
CONFIG_GENERIC_FIND_NEXT_BIT=y
CONFIG_GENERIC_HWEIGHT=y
CONFIG_GENERIC_HARDIRQS=y
CONFIG_GENERIC_IRQ_PROBE=y
CONFIG_GENERIC_CALIBRATE_DELAY=y
CONFIG_GENERIC_TIME=y
# CONFIG_GENERIC_TIME_VSYSCALL is not set
CONFIG_GENERIC_CLOCKEVENTS=y
CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
CONFIG_GENERIC_GPIO=y
CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
#
# General setup
#
CONFIG_EXPERIMENTAL=y
CONFIG_BROKEN_ON_SMP=y
CONFIG_INIT_ENV_ARG_LIMIT=32
CONFIG_LOCALVERSION=""
CONFIG_LOCALVERSION_AUTO=y
CONFIG_SYSVIPC=y
CONFIG_SYSVIPC_SYSCTL=y
# CONFIG_POSIX_MQUEUE is not set
# CONFIG_BSD_PROCESS_ACCT is not set
# CONFIG_TASKSTATS is not set
# CONFIG_AUDIT is not set
#
# RCU Subsystem
#
CONFIG_CLASSIC_RCU=y
# CONFIG_TREE_RCU is not set
# CONFIG_PREEMPT_RCU is not set
# CONFIG_TREE_RCU_TRACE is not set
# CONFIG_PREEMPT_RCU_TRACE is not set
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=17
# CONFIG_GROUP_SCHED is not set
# CONFIG_CGROUPS is not set
CONFIG_SYSFS_DEPRECATED=y
CONFIG_SYSFS_DEPRECATED_V2=y
# CONFIG_RELAY is not set
# CONFIG_NAMESPACES is not set
CONFIG_BLK_DEV_INITRD=y
CONFIG_INITRAMFS_SOURCE="rootfs.cpio"
CONFIG_INITRAMFS_ROOT_UID=0
CONFIG_INITRAMFS_ROOT_GID=0
CONFIG_RD_GZIP=y
# CONFIG_RD_BZIP2 is not set
# CONFIG_RD_LZMA is not set
CONFIG_INITRAMFS_COMPRESSION_NONE=y
# CONFIG_INITRAMFS_COMPRESSION_GZIP is not set
# CONFIG_INITRAMFS_COMPRESSION_BZIP2 is not set
# CONFIG_INITRAMFS_COMPRESSION_LZMA is not set
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
CONFIG_SYSCTL=y
CONFIG_ANON_INODES=y
CONFIG_EMBEDDED=y
CONFIG_SYSCTL_SYSCALL=y
CONFIG_KALLSYMS=y
CONFIG_KALLSYMS_ALL=y
CONFIG_KALLSYMS_EXTRA_PASS=y
# CONFIG_STRIP_ASM_SYMS is not set
# CONFIG_HOTPLUG is not set
CONFIG_PRINTK=y
CONFIG_BUG=y
CONFIG_ELF_CORE=y
# CONFIG_BASE_FULL is not set
# CONFIG_FUTEX is not set
# CONFIG_EPOLL is not set
# CONFIG_SIGNALFD is not set
CONFIG_TIMERFD=y
CONFIG_EVENTFD=y
# CONFIG_SHMEM is not set
CONFIG_AIO=y
CONFIG_VM_EVENT_COUNTERS=y
CONFIG_COMPAT_BRK=y
CONFIG_SLAB=y
# CONFIG_SLUB is not set
# CONFIG_SLOB is not set
# CONFIG_PROFILING is not set
# CONFIG_MARKERS is not set
# CONFIG_SLOW_WORK is not set
# CONFIG_HAVE_GENERIC_DMA_COHERENT is not set
CONFIG_SLABINFO=y
CONFIG_BASE_SMALL=1
CONFIG_MODULES=y
# CONFIG_MODULE_FORCE_LOAD is not set
CONFIG_MODULE_UNLOAD=y
# CONFIG_MODULE_FORCE_UNLOAD is not set
# CONFIG_MODVERSIONS is not set
# CONFIG_MODULE_SRCVERSION_ALL is not set
CONFIG_BLOCK=y
# CONFIG_LBD is not set
# CONFIG_BLK_DEV_BSG is not set
# CONFIG_BLK_DEV_INTEGRITY is not set
#
# IO Schedulers
#
CONFIG_IOSCHED_NOOP=y
CONFIG_IOSCHED_AS=y
CONFIG_IOSCHED_DEADLINE=y
CONFIG_IOSCHED_CFQ=y
# CONFIG_DEFAULT_AS is not set
# CONFIG_DEFAULT_DEADLINE is not set
CONFIG_DEFAULT_CFQ=y
# CONFIG_DEFAULT_NOOP is not set
CONFIG_DEFAULT_IOSCHED="cfq"
# CONFIG_FREEZER is not set
#
# Platform options
#
CONFIG_PLATFORM_GENERIC=y
CONFIG_OPT_LIB_FUNCTION=y
CONFIG_OPT_LIB_ASM=y
CONFIG_ALLOW_EDIT_AUTO=y
#
# Automatic platform settings from Kconfig.auto
#
#
# Definitions for MICROBLAZE0
#
CONFIG_KERNEL_BASE_ADDR=0x90000000
CONFIG_XILINX_MICROBLAZE0_FAMILY="virtex5"
CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR=1
CONFIG_XILINX_MICROBLAZE0_USE_PCMP_INSTR=1
CONFIG_XILINX_MICROBLAZE0_USE_BARREL=1
CONFIG_XILINX_MICROBLAZE0_USE_DIV=1
CONFIG_XILINX_MICROBLAZE0_USE_HW_MUL=2
CONFIG_XILINX_MICROBLAZE0_USE_FPU=2
CONFIG_XILINX_MICROBLAZE0_HW_VER="7.10.d"
#
# Processor type and features
#
# CONFIG_NO_HZ is not set
# CONFIG_HIGH_RES_TIMERS is not set
CONFIG_GENERIC_CLOCKEVENTS_BUILD=y
CONFIG_PREEMPT_NONE=y
# CONFIG_PREEMPT_VOLUNTARY is not set
# CONFIG_PREEMPT is not set
CONFIG_HZ_100=y
# CONFIG_HZ_250 is not set
# CONFIG_HZ_300 is not set
# CONFIG_HZ_1000 is not set
CONFIG_HZ=100
# CONFIG_SCHED_HRTICK is not set
CONFIG_MMU=y
#
# Boot options
#
CONFIG_CMDLINE_BOOL=y
CONFIG_CMDLINE="console=ttyUL0,115200"
CONFIG_CMDLINE_FORCE=y
CONFIG_OF=y
CONFIG_PROC_DEVICETREE=y
#
# Advanced setup
#
# CONFIG_ADVANCED_OPTIONS is not set
#
# Default settings for advanced configuration options are used
#
CONFIG_HIGHMEM_START=0xfe000000
CONFIG_LOWMEM_SIZE=0x30000000
CONFIG_KERNEL_START=0xc0000000
CONFIG_TASK_SIZE=0x80000000
CONFIG_SELECT_MEMORY_MODEL=y
CONFIG_FLATMEM_MANUAL=y
# CONFIG_DISCONTIGMEM_MANUAL is not set
# CONFIG_SPARSEMEM_MANUAL is not set
CONFIG_FLATMEM=y
CONFIG_FLAT_NODE_MEM_MAP=y
CONFIG_PAGEFLAGS_EXTENDED=y
CONFIG_SPLIT_PTLOCK_CPUS=4
# CONFIG_PHYS_ADDR_T_64BIT is not set
CONFIG_ZONE_DMA_FLAG=0
CONFIG_VIRT_TO_BUS=y
CONFIG_UNEVICTABLE_LRU=y
CONFIG_HAVE_MLOCK=y
CONFIG_HAVE_MLOCKED_PAGE_BIT=y
#
# Exectuable file formats
#
CONFIG_BINFMT_ELF=y
# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
# CONFIG_HAVE_AOUT is not set
# CONFIG_BINFMT_MISC is not set
CONFIG_NET=y
#
# Networking options
#
CONFIG_PACKET=y
# CONFIG_PACKET_MMAP is not set
CONFIG_UNIX=y
CONFIG_XFRM=y
# CONFIG_XFRM_USER is not set
# CONFIG_XFRM_SUB_POLICY is not set
# CONFIG_XFRM_MIGRATE is not set
# CONFIG_XFRM_STATISTICS is not set
# CONFIG_NET_KEY is not set
CONFIG_INET=y
# CONFIG_IP_MULTICAST is not set
# CONFIG_IP_ADVANCED_ROUTER is not set
CONFIG_IP_FIB_HASH=y
# CONFIG_IP_PNP is not set
# CONFIG_NET_IPIP is not set
# CONFIG_NET_IPGRE is not set
# CONFIG_ARPD is not set
# CONFIG_SYN_COOKIES is not set
# CONFIG_INET_AH is not set
# CONFIG_INET_ESP is not set
# CONFIG_INET_IPCOMP is not set
# CONFIG_INET_XFRM_TUNNEL is not set
# CONFIG_INET_TUNNEL is not set
CONFIG_INET_XFRM_MODE_TRANSPORT=y
CONFIG_INET_XFRM_MODE_TUNNEL=y
CONFIG_INET_XFRM_MODE_BEET=y
# CONFIG_INET_LRO is not set
CONFIG_INET_DIAG=y
CONFIG_INET_TCP_DIAG=y
# CONFIG_TCP_CONG_ADVANCED is not set
CONFIG_TCP_CONG_CUBIC=y
CONFIG_DEFAULT_TCP_CONG="cubic"
# CONFIG_TCP_MD5SIG is not set
# CONFIG_IPV6 is not set
# CONFIG_NETWORK_SECMARK is not set
# CONFIG_NETFILTER is not set
# CONFIG_IP_DCCP is not set
# CONFIG_IP_SCTP is not set
# CONFIG_TIPC is not set
# CONFIG_ATM is not set
# CONFIG_BRIDGE is not set
# CONFIG_NET_DSA is not set
# CONFIG_VLAN_8021Q is not set
# CONFIG_DECNET is not set
# CONFIG_LLC2 is not set
# CONFIG_IPX is not set
# CONFIG_ATALK is not set
# CONFIG_X25 is not set
# CONFIG_LAPB is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
# CONFIG_PHONET is not set
# CONFIG_NET_SCHED is not set
# CONFIG_DCB is not set
#
# Network testing
#
# CONFIG_NET_PKTGEN is not set
# CONFIG_HAMRADIO is not set
# CONFIG_CAN is not set
# CONFIG_IRDA is not set
# CONFIG_BT is not set
# CONFIG_AF_RXRPC is not set
# CONFIG_WIRELESS is not set
# CONFIG_WIMAX is not set
# CONFIG_RFKILL is not set
# CONFIG_NET_9P is not set
#
# Device Drivers
#
#
# Generic Driver Options
#
CONFIG_STANDALONE=y
CONFIG_PREVENT_FIRMWARE_BUILD=y
# CONFIG_DEBUG_DRIVER is not set
# CONFIG_DEBUG_DEVRES is not set
# CONFIG_SYS_HYPERVISOR is not set
# CONFIG_CONNECTOR is not set
# CONFIG_MTD is not set
CONFIG_OF_DEVICE=y
# CONFIG_PARPORT is not set
CONFIG_BLK_DEV=y
# CONFIG_BLK_DEV_COW_COMMON is not set
# CONFIG_BLK_DEV_LOOP is not set
# CONFIG_BLK_DEV_NBD is not set
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_COUNT=16
CONFIG_BLK_DEV_RAM_SIZE=8192
# CONFIG_BLK_DEV_XIP is not set
# CONFIG_CDROM_PKTCDVD is not set
# CONFIG_ATA_OVER_ETH is not set
# CONFIG_XILINX_SYSACE is not set
CONFIG_MISC_DEVICES=y
# CONFIG_ENCLOSURE_SERVICES is not set
# CONFIG_C2PORT is not set
#
# EEPROM support
#
# CONFIG_EEPROM_93CX6 is not set
#
# SCSI device support
#
# CONFIG_RAID_ATTRS is not set
# CONFIG_SCSI is not set
# CONFIG_SCSI_DMA is not set
# CONFIG_SCSI_NETLINK is not set
# CONFIG_ATA is not set
# CONFIG_MD is not set
CONFIG_NETDEVICES=y
CONFIG_COMPAT_NET_DEV_OPS=y
# CONFIG_DUMMY is not set
# CONFIG_BONDING is not set
# CONFIG_MACVLAN is not set
# CONFIG_EQUALIZER is not set
# CONFIG_TUN is not set
# CONFIG_VETH is not set
# CONFIG_PHYLIB is not set
CONFIG_NET_ETHERNET=y
# CONFIG_MII is not set
# CONFIG_ETHOC is not set
# CONFIG_DNET is not set
# CONFIG_IBM_NEW_EMAC_ZMII is not set
# CONFIG_IBM_NEW_EMAC_RGMII is not set
# CONFIG_IBM_NEW_EMAC_TAH is not set
# CONFIG_IBM_NEW_EMAC_EMAC4 is not set
# CONFIG_IBM_NEW_EMAC_NO_FLOW_CTRL is not set
# CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set
# CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set
# CONFIG_B44 is not set
CONFIG_NETDEV_1000=y
CONFIG_NETDEV_10000=y
#
# Wireless LAN
#
# CONFIG_WLAN_PRE80211 is not set
# CONFIG_WLAN_80211 is not set
#
# Enable WiMAX (Networking options) to see the WiMAX drivers
#
# CONFIG_WAN is not set
# CONFIG_PPP is not set
# CONFIG_SLIP is not set
# CONFIG_NETCONSOLE is not set
# CONFIG_NETPOLL is not set
# CONFIG_NET_POLL_CONTROLLER is not set
# CONFIG_ISDN is not set
# CONFIG_PHONE is not set
#
# Input device support
#
# CONFIG_INPUT is not set
#
# Hardware I/O ports
#
# CONFIG_SERIO is not set
# CONFIG_GAMEPORT is not set
#
# Character devices
#
# CONFIG_VT is not set
CONFIG_DEVKMEM=y
# CONFIG_SERIAL_NONSTANDARD is not set
#
# Serial drivers
#
# CONFIG_SERIAL_8250 is not set
#
# Non-8250 serial port support
#
CONFIG_SERIAL_UARTLITE=y
CONFIG_SERIAL_UARTLITE_CONSOLE=y
CONFIG_SERIAL_CORE=y
CONFIG_SERIAL_CORE_CONSOLE=y
CONFIG_UNIX98_PTYS=y
# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
CONFIG_LEGACY_PTYS=y
CONFIG_LEGACY_PTY_COUNT=256
# CONFIG_IPMI_HANDLER is not set
# CONFIG_HW_RANDOM is not set
# CONFIG_RTC is not set
# CONFIG_GEN_RTC is not set
# CONFIG_XILINX_HWICAP is not set
# CONFIG_R3964 is not set
# CONFIG_RAW_DRIVER is not set
# CONFIG_TCG_TPM is not set
# CONFIG_I2C is not set
# CONFIG_SPI is not set
CONFIG_ARCH_WANT_OPTIONAL_GPIOLIB=y
# CONFIG_GPIOLIB is not set
# CONFIG_W1 is not set
# CONFIG_POWER_SUPPLY is not set
# CONFIG_HWMON is not set
# CONFIG_THERMAL is not set
# CONFIG_THERMAL_HWMON is not set
# CONFIG_WATCHDOG is not set
CONFIG_SSB_POSSIBLE=y
#
# Sonics Silicon Backplane
#
# CONFIG_SSB is not set
#
# Multifunction device drivers
#
# CONFIG_MFD_CORE is not set
# CONFIG_MFD_SM501 is not set
# CONFIG_HTC_PASIC3 is not set
# CONFIG_MFD_TMIO is not set
# CONFIG_REGULATOR is not set
#
# Multimedia devices
#
#
# Multimedia core support
#
# CONFIG_VIDEO_DEV is not set
# CONFIG_DVB_CORE is not set
# CONFIG_VIDEO_MEDIA is not set
#
# Multimedia drivers
#
# CONFIG_DAB is not set
#
# Graphics support
#
# CONFIG_VGASTATE is not set
# CONFIG_VIDEO_OUTPUT_CONTROL is not set
# CONFIG_FB is not set
# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
#
# Display device support
#
# CONFIG_DISPLAY_SUPPORT is not set
# CONFIG_SOUND is not set
# CONFIG_USB_SUPPORT is not set
# CONFIG_MMC is not set
# CONFIG_MEMSTICK is not set
# CONFIG_NEW_LEDS is not set
# CONFIG_ACCESSIBILITY is not set
# CONFIG_RTC_CLASS is not set
# CONFIG_DMADEVICES is not set
# CONFIG_AUXDISPLAY is not set
# CONFIG_UIO is not set
# CONFIG_STAGING is not set
#
# File systems
#
CONFIG_EXT2_FS=y
# CONFIG_EXT2_FS_XATTR is not set
# CONFIG_EXT2_FS_XIP is not set
# CONFIG_EXT3_FS is not set
# CONFIG_EXT4_FS is not set
# CONFIG_REISERFS_FS is not set
# CONFIG_JFS_FS is not set
# CONFIG_FS_POSIX_ACL is not set
CONFIG_FILE_LOCKING=y
# CONFIG_XFS_FS is not set
# CONFIG_OCFS2_FS is not set
# CONFIG_BTRFS_FS is not set
# CONFIG_DNOTIFY is not set
# CONFIG_INOTIFY is not set
# CONFIG_QUOTA is not set
# CONFIG_AUTOFS_FS is not set
# CONFIG_AUTOFS4_FS is not set
# CONFIG_FUSE_FS is not set
#
# Caches
#
# CONFIG_FSCACHE is not set
#
# CD-ROM/DVD Filesystems
#
# CONFIG_ISO9660_FS is not set
# CONFIG_UDF_FS is not set
#
# DOS/FAT/NT Filesystems
#
# CONFIG_MSDOS_FS is not set
# CONFIG_VFAT_FS is not set
# CONFIG_NTFS_FS is not set
#
# Pseudo filesystems
#
CONFIG_PROC_FS=y
# CONFIG_PROC_KCORE is not set
CONFIG_PROC_SYSCTL=y
CONFIG_PROC_PAGE_MONITOR=y
CONFIG_SYSFS=y
CONFIG_TMPFS=y
# CONFIG_TMPFS_POSIX_ACL is not set
# CONFIG_HUGETLB_PAGE is not set
# CONFIG_CONFIGFS_FS is not set
CONFIG_MISC_FILESYSTEMS=y
# CONFIG_ADFS_FS is not set
# CONFIG_AFFS_FS is not set
# CONFIG_HFS_FS is not set
# CONFIG_HFSPLUS_FS is not set
# CONFIG_BEFS_FS is not set
# CONFIG_BFS_FS is not set
# CONFIG_EFS_FS is not set
# CONFIG_CRAMFS is not set
# CONFIG_SQUASHFS is not set
# CONFIG_VXFS_FS is not set
# CONFIG_MINIX_FS is not set
# CONFIG_OMFS_FS is not set
# CONFIG_HPFS_FS is not set
# CONFIG_QNX4FS_FS is not set
# CONFIG_ROMFS_FS is not set
# CONFIG_SYSV_FS is not set
# CONFIG_UFS_FS is not set
# CONFIG_NILFS2_FS is not set
CONFIG_NETWORK_FILESYSTEMS=y
CONFIG_NFS_FS=y
CONFIG_NFS_V3=y
# CONFIG_NFS_V3_ACL is not set
# CONFIG_NFS_V4 is not set
# CONFIG_NFSD is not set
CONFIG_LOCKD=y
CONFIG_LOCKD_V4=y
CONFIG_NFS_COMMON=y
CONFIG_SUNRPC=y
# CONFIG_RPCSEC_GSS_KRB5 is not set
# CONFIG_RPCSEC_GSS_SPKM3 is not set
# CONFIG_SMB_FS is not set
CONFIG_CIFS=y
CONFIG_CIFS_STATS=y
CONFIG_CIFS_STATS2=y
# CONFIG_CIFS_WEAK_PW_HASH is not set
# CONFIG_CIFS_XATTR is not set
# CONFIG_CIFS_DEBUG2 is not set
# CONFIG_CIFS_EXPERIMENTAL is not set
# CONFIG_NCP_FS is not set
# CONFIG_CODA_FS is not set
# CONFIG_AFS_FS is not set
#
# Partition Types
#
CONFIG_PARTITION_ADVANCED=y
# CONFIG_ACORN_PARTITION is not set
# CONFIG_OSF_PARTITION is not set
# CONFIG_AMIGA_PARTITION is not set
# CONFIG_ATARI_PARTITION is not set
# CONFIG_MAC_PARTITION is not set
CONFIG_MSDOS_PARTITION=y
# CONFIG_BSD_DISKLABEL is not set
# CONFIG_MINIX_SUBPARTITION is not set
# CONFIG_SOLARIS_X86_PARTITION is not set
# CONFIG_UNIXWARE_DISKLABEL is not set
# CONFIG_LDM_PARTITION is not set
# CONFIG_SGI_PARTITION is not set
# CONFIG_ULTRIX_PARTITION is not set
# CONFIG_SUN_PARTITION is not set
# CONFIG_KARMA_PARTITION is not set
# CONFIG_EFI_PARTITION is not set
# CONFIG_SYSV68_PARTITION is not set
CONFIG_NLS=y
CONFIG_NLS_DEFAULT="iso8859-1"
# CONFIG_NLS_CODEPAGE_437 is not set
# CONFIG_NLS_CODEPAGE_737 is not set
# CONFIG_NLS_CODEPAGE_775 is not set
# CONFIG_NLS_CODEPAGE_850 is not set
# CONFIG_NLS_CODEPAGE_852 is not set
# CONFIG_NLS_CODEPAGE_855 is not set
# CONFIG_NLS_CODEPAGE_857 is not set
# CONFIG_NLS_CODEPAGE_860 is not set
# CONFIG_NLS_CODEPAGE_861 is not set
# CONFIG_NLS_CODEPAGE_862 is not set
# CONFIG_NLS_CODEPAGE_863 is not set
# CONFIG_NLS_CODEPAGE_864 is not set
# CONFIG_NLS_CODEPAGE_865 is not set
# CONFIG_NLS_CODEPAGE_866 is not set
# CONFIG_NLS_CODEPAGE_869 is not set
# CONFIG_NLS_CODEPAGE_936 is not set
# CONFIG_NLS_CODEPAGE_950 is not set
# CONFIG_NLS_CODEPAGE_932 is not set
# CONFIG_NLS_CODEPAGE_949 is not set
# CONFIG_NLS_CODEPAGE_874 is not set
# CONFIG_NLS_ISO8859_8 is not set
# CONFIG_NLS_CODEPAGE_1250 is not set
# CONFIG_NLS_CODEPAGE_1251 is not set
# CONFIG_NLS_ASCII is not set
# CONFIG_NLS_ISO8859_1 is not set
# CONFIG_NLS_ISO8859_2 is not set
# CONFIG_NLS_ISO8859_3 is not set
# CONFIG_NLS_ISO8859_4 is not set
# CONFIG_NLS_ISO8859_5 is not set
# CONFIG_NLS_ISO8859_6 is not set
# CONFIG_NLS_ISO8859_7 is not set
# CONFIG_NLS_ISO8859_9 is not set
# CONFIG_NLS_ISO8859_13 is not set
# CONFIG_NLS_ISO8859_14 is not set
# CONFIG_NLS_ISO8859_15 is not set
# CONFIG_NLS_KOI8_R is not set
# CONFIG_NLS_KOI8_U is not set
# CONFIG_NLS_UTF8 is not set
# CONFIG_DLM is not set
#
# Kernel hacking
#
# CONFIG_PRINTK_TIME is not set
CONFIG_ENABLE_WARN_DEPRECATED=y
CONFIG_ENABLE_MUST_CHECK=y
CONFIG_FRAME_WARN=1024
# CONFIG_MAGIC_SYSRQ is not set
# CONFIG_UNUSED_SYMBOLS is not set
# CONFIG_DEBUG_FS is not set
# CONFIG_HEADERS_CHECK is not set
CONFIG_DEBUG_KERNEL=y
# CONFIG_DEBUG_SHIRQ is not set
CONFIG_DETECT_SOFTLOCKUP=y
# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set
CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0
CONFIG_DETECT_HUNG_TASK=y
# CONFIG_BOOTPARAM_HUNG_TASK_PANIC is not set
CONFIG_BOOTPARAM_HUNG_TASK_PANIC_VALUE=0
CONFIG_SCHED_DEBUG=y
# CONFIG_SCHEDSTATS is not set
# CONFIG_TIMER_STATS is not set
# CONFIG_DEBUG_OBJECTS is not set
CONFIG_DEBUG_SLAB=y
# CONFIG_DEBUG_SLAB_LEAK is not set
CONFIG_DEBUG_SPINLOCK=y
# CONFIG_DEBUG_MUTEXES is not set
# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
# CONFIG_DEBUG_KOBJECT is not set
CONFIG_DEBUG_INFO=y
# CONFIG_DEBUG_VM is not set
# CONFIG_DEBUG_WRITECOUNT is not set
# CONFIG_DEBUG_MEMORY_INIT is not set
# CONFIG_DEBUG_LIST is not set
# CONFIG_DEBUG_SG is not set
# CONFIG_DEBUG_NOTIFIERS is not set
# CONFIG_BOOT_PRINTK_DELAY is not set
# CONFIG_RCU_TORTURE_TEST is not set
# CONFIG_RCU_CPU_STALL_DETECTOR is not set
# CONFIG_BACKTRACE_SELF_TEST is not set
# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set
# CONFIG_FAULT_INJECTION is not set
# CONFIG_SYSCTL_SYSCALL_CHECK is not set
# CONFIG_PAGE_POISONING is not set
# CONFIG_SAMPLES is not set
CONFIG_EARLY_PRINTK=y
CONFIG_HEART_BEAT=y
CONFIG_DEBUG_BOOTMEM=y
#
# Security options
#
# CONFIG_KEYS is not set
# CONFIG_SECURITY is not set
# CONFIG_SECURITYFS is not set
# CONFIG_SECURITY_FILE_CAPABILITIES is not set
CONFIG_CRYPTO=y
#
# Crypto core or helper
#
# CONFIG_CRYPTO_FIPS is not set
# CONFIG_CRYPTO_MANAGER is not set
# CONFIG_CRYPTO_MANAGER2 is not set
# CONFIG_CRYPTO_GF128MUL is not set
# CONFIG_CRYPTO_NULL is not set
# CONFIG_CRYPTO_CRYPTD is not set
# CONFIG_CRYPTO_AUTHENC is not set
# CONFIG_CRYPTO_TEST is not set
#
# Authenticated Encryption with Associated Data
#
# CONFIG_CRYPTO_CCM is not set
# CONFIG_CRYPTO_GCM is not set
# CONFIG_CRYPTO_SEQIV is not set
#
# Block modes
#
# CONFIG_CRYPTO_CBC is not set
# CONFIG_CRYPTO_CTR is not set
# CONFIG_CRYPTO_CTS is not set
# CONFIG_CRYPTO_ECB is not set
# CONFIG_CRYPTO_LRW is not set
# CONFIG_CRYPTO_PCBC is not set
# CONFIG_CRYPTO_XTS is not set
#
# Hash modes
#
# CONFIG_CRYPTO_HMAC is not set
# CONFIG_CRYPTO_XCBC is not set
#
# Digest
#
# CONFIG_CRYPTO_CRC32C is not set
# CONFIG_CRYPTO_MD4 is not set
# CONFIG_CRYPTO_MD5 is not set
# CONFIG_CRYPTO_MICHAEL_MIC is not set
# CONFIG_CRYPTO_RMD128 is not set
# CONFIG_CRYPTO_RMD160 is not set
# CONFIG_CRYPTO_RMD256 is not set
# CONFIG_CRYPTO_RMD320 is not set
# CONFIG_CRYPTO_SHA1 is not set
# CONFIG_CRYPTO_SHA256 is not set
# CONFIG_CRYPTO_SHA512 is not set
# CONFIG_CRYPTO_TGR192 is not set
# CONFIG_CRYPTO_WP512 is not set
#
# Ciphers
#
# CONFIG_CRYPTO_AES is not set
# CONFIG_CRYPTO_ANUBIS is not set
# CONFIG_CRYPTO_ARC4 is not set
# CONFIG_CRYPTO_BLOWFISH is not set
# CONFIG_CRYPTO_CAMELLIA is not set
# CONFIG_CRYPTO_CAST5 is not set
# CONFIG_CRYPTO_CAST6 is not set
# CONFIG_CRYPTO_DES is not set
# CONFIG_CRYPTO_FCRYPT is not set
# CONFIG_CRYPTO_KHAZAD is not set
# CONFIG_CRYPTO_SALSA20 is not set
# CONFIG_CRYPTO_SEED is not set
# CONFIG_CRYPTO_SERPENT is not set
# CONFIG_CRYPTO_TEA is not set
# CONFIG_CRYPTO_TWOFISH is not set
#
# Compression
#
# CONFIG_CRYPTO_DEFLATE is not set
# CONFIG_CRYPTO_ZLIB is not set
# CONFIG_CRYPTO_LZO is not set
#
# Random Number Generation
#
# CONFIG_CRYPTO_ANSI_CPRNG is not set
CONFIG_CRYPTO_HW=y
# CONFIG_BINARY_PRINTF is not set
#
# Library routines
#
CONFIG_BITREVERSE=y
CONFIG_GENERIC_FIND_LAST_BIT=y
# CONFIG_CRC_CCITT is not set
# CONFIG_CRC16 is not set
# CONFIG_CRC_T10DIF is not set
# CONFIG_CRC_ITU_T is not set
CONFIG_CRC32=y
# CONFIG_CRC7 is not set
# CONFIG_LIBCRC32C is not set
CONFIG_ZLIB_INFLATE=y
CONFIG_DECOMPRESS_GZIP=y
CONFIG_HAS_IOMEM=y
CONFIG_HAS_IOPORT=y
CONFIG_HAS_DMA=y
CONFIG_HAVE_LMB=y
CONFIG_NLATTR=y
include include/asm-generic/Kbuild.asm include include/asm-generic/Kbuild.asm
header-y += auxvec.h header-y += elf.h
header-y += errno.h
header-y += fcntl.h
header-y += ioctl.h
header-y += ioctls.h
header-y += ipcbuf.h
header-y += linkage.h
header-y += msgbuf.h
header-y += poll.h
header-y += resource.h
header-y += sembuf.h
header-y += shmbuf.h
header-y += sigcontext.h
header-y += siginfo.h
header-y += socket.h
header-y += sockios.h
header-y += statfs.h
header-y += stat.h
header-y += termbits.h
header-y += ucontext.h
unifdef-y += cputable.h
unifdef-y += elf.h
unifdef-y += termios.h
/* /*
* Copyright (C) 2007 PetaLogix * Copyright (C) 2007-2009 Michal Simek <monstr@monstr.eu>
* Copyright (C) 2007-2009 PetaLogix
* Copyright (C) 2007 John Williams <john.williams@petalogix.com> * Copyright (C) 2007 John Williams <john.williams@petalogix.com>
* based on v850 version which was * based on v850 version which was
* Copyright (C) 2001,02,03 NEC Electronics Corporation * Copyright (C) 2001,02,03 NEC Electronics Corporation
...@@ -43,6 +44,23 @@ ...@@ -43,6 +44,23 @@
#define flush_icache_range(start, len) __invalidate_icache_range(start, len) #define flush_icache_range(start, len) __invalidate_icache_range(start, len)
#define flush_icache_page(vma, pg) do { } while (0) #define flush_icache_page(vma, pg) do { } while (0)
#ifndef CONFIG_MMU
# define flush_icache_user_range(start, len) do { } while (0)
#else
# define flush_icache_user_range(vma, pg, adr, len) __invalidate_icache_all()
# define flush_page_to_ram(page) do { } while (0)
# define flush_icache() __invalidate_icache_all()
# define flush_cache_sigtramp(vaddr) \
__invalidate_icache_range(vaddr, vaddr + 8)
# define flush_dcache_mmap_lock(mapping) do { } while (0)
# define flush_dcache_mmap_unlock(mapping) do { } while (0)
# define flush_cache_dup_mm(mm) do { } while (0)
#endif
#define flush_cache_vmap(start, end) do { } while (0) #define flush_cache_vmap(start, end) do { } while (0)
#define flush_cache_vunmap(start, end) do { } while (0) #define flush_cache_vunmap(start, end) do { } while (0)
......
...@@ -51,7 +51,8 @@ extern __wsum csum_partial(const void *buff, int len, __wsum sum); ...@@ -51,7 +51,8 @@ extern __wsum csum_partial(const void *buff, int len, __wsum sum);
* here even more important to align src and dst on a 32-bit (or even * here even more important to align src and dst on a 32-bit (or even
* better 64-bit) boundary * better 64-bit) boundary
*/ */
extern __wsum csum_partial_copy(const char *src, char *dst, int len, int sum); extern __wsum csum_partial_copy(const void *src, void *dst, int len,
__wsum sum);
/* /*
* the same as csum_partial_copy, but copies from user space. * the same as csum_partial_copy, but copies from user space.
...@@ -59,8 +60,8 @@ extern __wsum csum_partial_copy(const char *src, char *dst, int len, int sum); ...@@ -59,8 +60,8 @@ extern __wsum csum_partial_copy(const char *src, char *dst, int len, int sum);
* here even more important to align src and dst on a 32-bit (or even * here even more important to align src and dst on a 32-bit (or even
* better 64-bit) boundary * better 64-bit) boundary
*/ */
extern __wsum csum_partial_copy_from_user(const char *src, char *dst, extern __wsum csum_partial_copy_from_user(const void __user *src, void *dst,
int len, int sum, int *csum_err); int len, __wsum sum, int *csum_err);
#define csum_partial_copy_nocheck(src, dst, len, sum) \ #define csum_partial_copy_nocheck(src, dst, len, sum) \
csum_partial_copy((src), (dst), (len), (sum)) csum_partial_copy((src), (dst), (len), (sum))
...@@ -75,11 +76,12 @@ extern __sum16 ip_fast_csum(const void *iph, unsigned int ihl); ...@@ -75,11 +76,12 @@ extern __sum16 ip_fast_csum(const void *iph, unsigned int ihl);
/* /*
* Fold a partial checksum * Fold a partial checksum
*/ */
static inline __sum16 csum_fold(unsigned int sum) static inline __sum16 csum_fold(__wsum csum)
{ {
u32 sum = (__force u32)csum;
sum = (sum & 0xffff) + (sum >> 16); sum = (sum & 0xffff) + (sum >> 16);
sum = (sum & 0xffff) + (sum >> 16); sum = (sum & 0xffff) + (sum >> 16);
return ~sum; return (__force __sum16)~sum;
} }
static inline __sum16 static inline __sum16
...@@ -93,6 +95,6 @@ csum_tcpudp_magic(__be32 saddr, __be32 daddr, unsigned short len, ...@@ -93,6 +95,6 @@ csum_tcpudp_magic(__be32 saddr, __be32 daddr, unsigned short len,
* this routine is used for miscellaneous IP-like checksums, mainly * this routine is used for miscellaneous IP-like checksums, mainly
* in icmp.c * in icmp.c
*/ */
extern __sum16 ip_compute_csum(const unsigned char *buff, int len); extern __sum16 ip_compute_csum(const void *buff, int len);
#endif /* _ASM_MICROBLAZE_CHECKSUM_H */ #endif /* _ASM_MICROBLAZE_CHECKSUM_H */
/* /*
* Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu>
* Copyright (C) 2008-2009 PetaLogix
* Copyright (C) 2006 Atmark Techno, Inc. * Copyright (C) 2006 Atmark Techno, Inc.
* *
* This file is subject to the terms and conditions of the GNU General Public * This file is subject to the terms and conditions of the GNU General Public
...@@ -9,6 +11,12 @@ ...@@ -9,6 +11,12 @@
#ifndef _ASM_MICROBLAZE_CURRENT_H #ifndef _ASM_MICROBLAZE_CURRENT_H
#define _ASM_MICROBLAZE_CURRENT_H #define _ASM_MICROBLAZE_CURRENT_H
/*
* Register used to hold the current task pointer while in the kernel.
* Any `call clobbered' register without a special meaning should be OK,
* but check asm/microblaze/kernel/entry.S to be sure.
*/
#define CURRENT_TASK r31
# ifndef __ASSEMBLY__ # ifndef __ASSEMBLY__
/* /*
* Dedicate r31 to keeping the current task pointer * Dedicate r31 to keeping the current task pointer
......
/* #include <asm-generic/dma-mapping-broken.h>
* Copyright (C) 2006 Atmark Techno, Inc.
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#ifndef _ASM_MICROBLAZE_DMA_MAPPING_H
#define _ASM_MICROBLAZE_DMA_MAPPING_H
#include <asm/cacheflush.h>
#include <linux/io.h>
#include <linux/bug.h>
struct scatterlist;
#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
/* FIXME */
static inline int
dma_supported(struct device *dev, u64 mask)
{
return 1;
}
static inline dma_addr_t
dma_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size,
enum dma_data_direction direction)
{
BUG();
return 0;
}
static inline void
dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
enum dma_data_direction direction)
{
BUG();
}
static inline int
dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
enum dma_data_direction direction)
{
BUG();
return 0;
}
static inline void
dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
enum dma_data_direction direction)
{
BUG();
}
static inline void
dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size,
enum dma_data_direction direction)
{
BUG();
}
static inline void
dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle,
size_t size, enum dma_data_direction direction)
{
BUG();
}
static inline void
dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
enum dma_data_direction direction)
{
BUG();
}
static inline void
dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems,
enum dma_data_direction direction)
{
BUG();
}
static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
{
return 0;
}
static inline void *dma_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, int flag)
{
return NULL; /* consistent_alloc(flag, size, dma_handle); */
}
static inline void dma_free_coherent(struct device *dev, size_t size,
void *vaddr, dma_addr_t dma_handle)
{
BUG();
}
static inline dma_addr_t
dma_map_single(struct device *dev, void *ptr, size_t size,
enum dma_data_direction direction)
{
BUG_ON(direction == DMA_NONE);
return virt_to_bus(ptr);
}
static inline void dma_unmap_single(struct device *dev, dma_addr_t dma_addr,
size_t size,
enum dma_data_direction direction)
{
switch (direction) {
case DMA_FROM_DEVICE:
flush_dcache_range((unsigned)dma_addr,
(unsigned)dma_addr + size);
/* Fall through */
case DMA_TO_DEVICE:
break;
default:
BUG();
}
}
#endif /* _ASM_MICROBLAZE_DMA_MAPPING_H */
...@@ -9,8 +9,13 @@ ...@@ -9,8 +9,13 @@
#ifndef _ASM_MICROBLAZE_DMA_H #ifndef _ASM_MICROBLAZE_DMA_H
#define _ASM_MICROBLAZE_DMA_H #define _ASM_MICROBLAZE_DMA_H
#ifndef CONFIG_MMU
/* we don't have dma address limit. define it as zero to be /* we don't have dma address limit. define it as zero to be
* unlimited. */ * unlimited. */
#define MAX_DMA_ADDRESS (0) #define MAX_DMA_ADDRESS (0)
#else
/* Virtual address corresponding to last available physical memory address. */
#define MAX_DMA_ADDRESS (CONFIG_KERNEL_START + memory_size - 1)
#endif
#endif /* _ASM_MICROBLAZE_DMA_H */ #endif /* _ASM_MICROBLAZE_DMA_H */
/* /*
* Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu>
* Copyright (C) 2008-2009 PetaLogix
* Copyright (C) 2006 Atmark Techno, Inc. * Copyright (C) 2006 Atmark Techno, Inc.
* *
* This file is subject to the terms and conditions of the GNU General Public * This file is subject to the terms and conditions of the GNU General Public
...@@ -27,4 +29,95 @@ ...@@ -27,4 +29,95 @@
*/ */
#define ELF_CLASS ELFCLASS32 #define ELF_CLASS ELFCLASS32
#ifndef __uClinux__
/*
* ELF register definitions..
*/
#include <asm/ptrace.h>
#include <asm/byteorder.h>
#ifndef ELF_GREG_T
#define ELF_GREG_T
typedef unsigned long elf_greg_t;
#endif
#ifndef ELF_NGREG
#define ELF_NGREG (sizeof(struct pt_regs) / sizeof(elf_greg_t))
#endif
#ifndef ELF_GREGSET_T
#define ELF_GREGSET_T
typedef elf_greg_t elf_gregset_t[ELF_NGREG];
#endif
#ifndef ELF_FPREGSET_T
#define ELF_FPREGSET_T
/* TBD */
#define ELF_NFPREG 33 /* includes fsr */
typedef unsigned long elf_fpreg_t;
typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
/* typedef struct user_fpu_struct elf_fpregset_t; */
#endif
/* This is the location that an ET_DYN program is loaded if exec'ed. Typical
* use of this is to invoke "./ld.so someprog" to test out a new version of
* the loader. We need to make sure that it is out of the way of the program
* that it will "exec", and that there is sufficient room for the brk.
*/
#define ELF_ET_DYN_BASE (0x08000000)
#ifdef __LITTLE_ENDIAN__
#define ELF_DATA ELFDATA2LSB
#else
#define ELF_DATA ELFDATA2MSB
#endif
#define USE_ELF_CORE_DUMP
#define ELF_EXEC_PAGESIZE 4096
#define ELF_CORE_COPY_REGS(_dest, _regs) \
memcpy((char *) &_dest, (char *) _regs, \
sizeof(struct pt_regs));
/* This yields a mask that user programs can use to figure out what
* instruction set this CPU supports. This could be done in user space,
* but it's not easy, and we've already done it here.
*/
#define ELF_HWCAP (0)
/* This yields a string that ld.so will use to load implementation
* specific libraries for optimization. This is more specific in
* intent than poking at uname or /proc/cpuinfo.
* For the moment, we have only optimizations for the Intel generations,
* but that could change...
*/
#define ELF_PLATFORM (NULL)
/* Added _f parameter. Is this definition correct: TBD */
#define ELF_PLAT_INIT(_r, _f) \
do { \
_r->r1 = _r->r1 = _r->r2 = _r->r3 = \
_r->r4 = _r->r5 = _r->r6 = _r->r7 = \
_r->r8 = _r->r9 = _r->r10 = _r->r11 = \
_r->r12 = _r->r13 = _r->r14 = _r->r15 = \
_r->r16 = _r->r17 = _r->r18 = _r->r19 = \
_r->r20 = _r->r21 = _r->r22 = _r->r23 = \
_r->r24 = _r->r25 = _r->r26 = _r->r27 = \
_r->r28 = _r->r29 = _r->r30 = _r->r31 = \
0; \
} while (0)
#ifdef __KERNEL__
#define SET_PERSONALITY(ex) set_personality(PER_LINUX_32BIT)
#endif
#endif /* __uClinux__ */
#endif /* _ASM_MICROBLAZE_ELF_H */ #endif /* _ASM_MICROBLAZE_ELF_H */
/* /*
* Definitions used by low-level trap handlers * Definitions used by low-level trap handlers
* *
* Copyright (C) 2008 Michal Simek * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu>
* Copyright (C) 2007 - 2008 PetaLogix * Copyright (C) 2007-2009 PetaLogix
* Copyright (C) 2007 John Williams <john.williams@petalogix.com> * Copyright (C) 2007 John Williams <john.williams@petalogix.com>
* *
* This file is subject to the terms and conditions of the GNU General * This file is subject to the terms and conditions of the GNU General
...@@ -31,7 +31,40 @@ DECLARE_PER_CPU(unsigned int, R11_SAVE); /* Temp variable for entry */ ...@@ -31,7 +31,40 @@ DECLARE_PER_CPU(unsigned int, R11_SAVE); /* Temp variable for entry */
DECLARE_PER_CPU(unsigned int, CURRENT_SAVE); /* Saved current pointer */ DECLARE_PER_CPU(unsigned int, CURRENT_SAVE); /* Saved current pointer */
# endif /* __ASSEMBLY__ */ # endif /* __ASSEMBLY__ */
#ifndef CONFIG_MMU
/* noMMU hasn't any space for args */ /* noMMU hasn't any space for args */
# define STATE_SAVE_ARG_SPACE (0) # define STATE_SAVE_ARG_SPACE (0)
#else /* CONFIG_MMU */
/* If true, system calls save and restore all registers (except result
* registers, of course). If false, then `call clobbered' registers
* will not be preserved, on the theory that system calls are basically
* function calls anyway, and the caller should be able to deal with it.
* This is a security risk, of course, as `internal' values may leak out
* after a system call, but that certainly doesn't matter very much for
* a processor with no MMU protection! For a protected-mode kernel, it
* would be faster to just zero those registers before returning.
*
* I can not rely on the glibc implementation. If you turn it off make
* sure that r11/r12 is saved in user-space. --KAA
*
* These are special variables using by the kernel trap/interrupt code
* to save registers in, at a time when there are no spare registers we
* can use to do so, and we can't depend on the value of the stack
* pointer. This means that they must be within a signed 16-bit
* displacement of 0x00000000.
*/
/* A `state save frame' is a struct pt_regs preceded by some extra space
* suitable for a function call stack frame. */
/* Amount of room on the stack reserved for arguments and to satisfy the
* C calling conventions, in addition to the space used by the struct
* pt_regs that actually holds saved values. */
#define STATE_SAVE_ARG_SPACE (6*4) /* Up to six arguments */
#endif /* CONFIG_MMU */
#endif /* _ASM_MICROBLAZE_ENTRY_H */ #endif /* _ASM_MICROBLAZE_ENTRY_H */
/* /*
* Preliminary support for HW exception handing for Microblaze * Preliminary support for HW exception handing for Microblaze
* *
* Copyright (C) 2008 Michal Simek * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu>
* Copyright (C) 2008 PetaLogix * Copyright (C) 2008-2009 PetaLogix
* Copyright (C) 2005 John Williams <jwilliams@itee.uq.edu.au> * Copyright (C) 2005 John Williams <jwilliams@itee.uq.edu.au>
* *
* This file is subject to the terms and conditions of the GNU General * This file is subject to the terms and conditions of the GNU General
...@@ -64,21 +64,13 @@ asmlinkage void full_exception(struct pt_regs *regs, unsigned int type, ...@@ -64,21 +64,13 @@ asmlinkage void full_exception(struct pt_regs *regs, unsigned int type,
void die(const char *str, struct pt_regs *fp, long err); void die(const char *str, struct pt_regs *fp, long err);
void _exception(int signr, struct pt_regs *regs, int code, unsigned long addr); void _exception(int signr, struct pt_regs *regs, int code, unsigned long addr);
#if defined(CONFIG_XMON) #ifdef CONFIG_MMU
extern void xmon(struct pt_regs *regs); void __bug(const char *file, int line, void *data);
extern int xmon_bpt(struct pt_regs *regs); int bad_trap(int trap_num, struct pt_regs *regs);
extern int xmon_sstep(struct pt_regs *regs); int debug_trap(struct pt_regs *regs);
extern int xmon_iabr_match(struct pt_regs *regs); #endif /* CONFIG_MMU */
extern int xmon_dabr_match(struct pt_regs *regs);
extern void (*xmon_fault_handler)(struct pt_regs *regs);
void (*debugger)(struct pt_regs *regs) = xmon; #if defined(CONFIG_KGDB)
int (*debugger_bpt)(struct pt_regs *regs) = xmon_bpt;
int (*debugger_sstep)(struct pt_regs *regs) = xmon_sstep;
int (*debugger_iabr_match)(struct pt_regs *regs) = xmon_iabr_match;
int (*debugger_dabr_match)(struct pt_regs *regs) = xmon_dabr_match;
void (*debugger_fault_handler)(struct pt_regs *regs);
#elif defined(CONFIG_KGDB)
void (*debugger)(struct pt_regs *regs); void (*debugger)(struct pt_regs *regs);
int (*debugger_bpt)(struct pt_regs *regs); int (*debugger_bpt)(struct pt_regs *regs);
int (*debugger_sstep)(struct pt_regs *regs); int (*debugger_sstep)(struct pt_regs *regs);
......
...@@ -13,7 +13,6 @@ ...@@ -13,7 +13,6 @@
#include <asm/unaligned.h> #include <asm/unaligned.h>
#define flat_stack_align(sp) /* nothing needed */
#define flat_argvp_envp_on_stack() 0 #define flat_argvp_envp_on_stack() 0
#define flat_old_ram_flag(flags) (flags) #define flat_old_ram_flag(flags) (flags)
#define flat_reloc_valid(reloc, size) ((reloc) <= (size)) #define flat_reloc_valid(reloc, size) ((reloc) <= (size))
......
...@@ -11,8 +11,8 @@ ...@@ -11,8 +11,8 @@
* (at your option) any later version. * (at your option) any later version.
*/ */
#ifndef __ASM_POWERPC_GPIO_H #ifndef _ASM_MICROBLAZE_GPIO_H
#define __ASM_POWERPC_GPIO_H #define _ASM_MICROBLAZE_GPIO_H
#include <linux/errno.h> #include <linux/errno.h>
#include <asm-generic/gpio.h> #include <asm-generic/gpio.h>
...@@ -53,4 +53,4 @@ static inline int irq_to_gpio(unsigned int irq) ...@@ -53,4 +53,4 @@ static inline int irq_to_gpio(unsigned int irq)
#endif /* CONFIG_GPIOLIB */ #endif /* CONFIG_GPIOLIB */
#endif /* __ASM_POWERPC_GPIO_H */ #endif /* _ASM_MICROBLAZE_GPIO_H */
/* /*
* Copyright (C) 2007-2009 Michal Simek <monstr@monstr.eu>
* Copyright (C) 2007-2009 PetaLogix
* Copyright (C) 2006 Atmark Techno, Inc. * Copyright (C) 2006 Atmark Techno, Inc.
* *
* This file is subject to the terms and conditions of the GNU General Public * This file is subject to the terms and conditions of the GNU General Public
...@@ -12,6 +14,9 @@ ...@@ -12,6 +14,9 @@
#include <asm/byteorder.h> #include <asm/byteorder.h>
#include <asm/page.h> #include <asm/page.h>
#include <linux/types.h> #include <linux/types.h>
#include <asm/byteorder.h>
#include <linux/mm.h> /* Get struct page {...} */
#define IO_SPACE_LIMIT (0xFFFFFFFF) #define IO_SPACE_LIMIT (0xFFFFFFFF)
...@@ -112,6 +117,30 @@ static inline void writel(unsigned int v, volatile void __iomem *addr) ...@@ -112,6 +117,30 @@ static inline void writel(unsigned int v, volatile void __iomem *addr)
#define memcpy_fromio(a, b, c) memcpy((a), (void *)(b), (c)) #define memcpy_fromio(a, b, c) memcpy((a), (void *)(b), (c))
#define memcpy_toio(a, b, c) memcpy((void *)(a), (b), (c)) #define memcpy_toio(a, b, c) memcpy((void *)(a), (b), (c))
#ifdef CONFIG_MMU
#define mm_ptov(addr) ((void *)__phys_to_virt(addr))
#define mm_vtop(addr) ((unsigned long)__virt_to_phys(addr))
#define phys_to_virt(addr) ((void *)__phys_to_virt(addr))
#define virt_to_phys(addr) ((unsigned long)__virt_to_phys(addr))
#define virt_to_bus(addr) ((unsigned long)__virt_to_phys(addr))
#define __page_address(page) \
(PAGE_OFFSET + (((page) - mem_map) << PAGE_SHIFT))
#define page_to_phys(page) virt_to_phys((void *)__page_address(page))
#define page_to_bus(page) (page_to_phys(page))
#define bus_to_virt(addr) (phys_to_virt(addr))
extern void iounmap(void *addr);
/*extern void *__ioremap(phys_addr_t address, unsigned long size,
unsigned long flags);*/
extern void __iomem *ioremap(phys_addr_t address, unsigned long size);
#define ioremap_writethrough(addr, size) ioremap((addr), (size))
#define ioremap_nocache(addr, size) ioremap((addr), (size))
#define ioremap_fullcache(addr, size) ioremap((addr), (size))
#else /* CONFIG_MMU */
/** /**
* virt_to_phys - map virtual addresses to physical * virt_to_phys - map virtual addresses to physical
* @address: address to remap * @address: address to remap
...@@ -160,6 +189,8 @@ static inline void __iomem *__ioremap(phys_addr_t address, unsigned long size, ...@@ -160,6 +189,8 @@ static inline void __iomem *__ioremap(phys_addr_t address, unsigned long size,
#define iounmap(addr) ((void)0) #define iounmap(addr) ((void)0)
#define ioremap_nocache(physaddr, size) ioremap(physaddr, size) #define ioremap_nocache(physaddr, size) ioremap(physaddr, size)
#endif /* CONFIG_MMU */
/* /*
* Convert a physical pointer to a virtual kernel pointer for /dev/mem * Convert a physical pointer to a virtual kernel pointer for /dev/mem
* access * access
......
/* /*
* Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu>
* Copyright (C) 2008-2009 PetaLogix
* Copyright (C) 2006 Atmark Techno, Inc. * Copyright (C) 2006 Atmark Techno, Inc.
* *
* This file is subject to the terms and conditions of the GNU General Public * This file is subject to the terms and conditions of the GNU General Public
...@@ -9,11 +11,109 @@ ...@@ -9,11 +11,109 @@
#ifndef _ASM_MICROBLAZE_MMU_H #ifndef _ASM_MICROBLAZE_MMU_H
#define _ASM_MICROBLAZE_MMU_H #define _ASM_MICROBLAZE_MMU_H
#ifndef __ASSEMBLY__ # ifndef CONFIG_MMU
# ifndef __ASSEMBLY__
typedef struct { typedef struct {
struct vm_list_struct *vmlist; struct vm_list_struct *vmlist;
unsigned long end_brk; unsigned long end_brk;
} mm_context_t; } mm_context_t;
#endif /* __ASSEMBLY__ */ # endif /* __ASSEMBLY__ */
# else /* CONFIG_MMU */
# ifdef __KERNEL__
# ifndef __ASSEMBLY__
/* Default "unsigned long" context */
typedef unsigned long mm_context_t;
/* Hardware Page Table Entry */
typedef struct _PTE {
unsigned long v:1; /* Entry is valid */
unsigned long vsid:24; /* Virtual segment identifier */
unsigned long h:1; /* Hash algorithm indicator */
unsigned long api:6; /* Abbreviated page index */
unsigned long rpn:20; /* Real (physical) page number */
unsigned long :3; /* Unused */
unsigned long r:1; /* Referenced */
unsigned long c:1; /* Changed */
unsigned long w:1; /* Write-thru cache mode */
unsigned long i:1; /* Cache inhibited */
unsigned long m:1; /* Memory coherence */
unsigned long g:1; /* Guarded */
unsigned long :1; /* Unused */
unsigned long pp:2; /* Page protection */
} PTE;
/* Values for PP (assumes Ks=0, Kp=1) */
# define PP_RWXX 0 /* Supervisor read/write, User none */
# define PP_RWRX 1 /* Supervisor read/write, User read */
# define PP_RWRW 2 /* Supervisor read/write, User read/write */
# define PP_RXRX 3 /* Supervisor read, User read */
/* Segment Register */
typedef struct _SEGREG {
unsigned long t:1; /* Normal or I/O type */
unsigned long ks:1; /* Supervisor 'key' (normally 0) */
unsigned long kp:1; /* User 'key' (normally 1) */
unsigned long n:1; /* No-execute */
unsigned long :4; /* Unused */
unsigned long vsid:24; /* Virtual Segment Identifier */
} SEGREG;
extern void _tlbie(unsigned long va); /* invalidate a TLB entry */
extern void _tlbia(void); /* invalidate all TLB entries */
# endif /* __ASSEMBLY__ */
/*
* The MicroBlaze processor has a TLB architecture identical to PPC-40x. The
* instruction and data sides share a unified, 64-entry, semi-associative
* TLB which is maintained totally under software control. In addition, the
* instruction side has a hardware-managed, 2,4, or 8-entry, fully-associative
* TLB which serves as a first level to the shared TLB. These two TLBs are
* known as the UTLB and ITLB, respectively.
*/
# define MICROBLAZE_TLB_SIZE 64
/*
* TLB entries are defined by a "high" tag portion and a "low" data
* portion. The data portion is 32-bits.
*
* TLB entries are managed entirely under software control by reading,
* writing, and searching using the MTS and MFS instructions.
*/
# define TLB_LO 1
# define TLB_HI 0
# define TLB_DATA TLB_LO
# define TLB_TAG TLB_HI
/* Tag portion */
# define TLB_EPN_MASK 0xFFFFFC00 /* Effective Page Number */
# define TLB_PAGESZ_MASK 0x00000380
# define TLB_PAGESZ(x) (((x) & 0x7) << 7)
# define PAGESZ_1K 0
# define PAGESZ_4K 1
# define PAGESZ_16K 2
# define PAGESZ_64K 3
# define PAGESZ_256K 4
# define PAGESZ_1M 5
# define PAGESZ_4M 6
# define PAGESZ_16M 7
# define TLB_VALID 0x00000040 /* Entry is valid */
/* Data portion */
# define TLB_RPN_MASK 0xFFFFFC00 /* Real Page Number */
# define TLB_PERM_MASK 0x00000300
# define TLB_EX 0x00000200 /* Instruction execution allowed */
# define TLB_WR 0x00000100 /* Writes permitted */
# define TLB_ZSEL_MASK 0x000000F0
# define TLB_ZSEL(x) (((x) & 0xF) << 4)
# define TLB_ATTR_MASK 0x0000000F
# define TLB_W 0x00000008 /* Caching is write-through */
# define TLB_I 0x00000004 /* Caching is inhibited */
# define TLB_M 0x00000002 /* Memory is coherent */
# define TLB_G 0x00000001 /* Memory is guarded from prefetch */
# endif /* __KERNEL__ */
# endif /* CONFIG_MMU */
#endif /* _ASM_MICROBLAZE_MMU_H */ #endif /* _ASM_MICROBLAZE_MMU_H */
/* #ifdef CONFIG_MMU
* Copyright (C) 2006 Atmark Techno, Inc. # include "mmu_context_mm.h"
* #else
* This file is subject to the terms and conditions of the GNU General Public # include "mmu_context_no.h"
* License. See the file "COPYING" in the main directory of this archive #endif
* for more details.
*/
#ifndef _ASM_MICROBLAZE_MMU_CONTEXT_H
#define _ASM_MICROBLAZE_MMU_CONTEXT_H
# define init_new_context(tsk, mm) ({ 0; })
# define enter_lazy_tlb(mm, tsk) do {} while (0)
# define change_mm_context(old, ctx, _pml4) do {} while (0)
# define destroy_context(mm) do {} while (0)
# define deactivate_mm(tsk, mm) do {} while (0)
# define switch_mm(prev, next, tsk) do {} while (0)
# define activate_mm(prev, next) do {} while (0)
#endif /* _ASM_MICROBLAZE_MMU_CONTEXT_H */
/*
* Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu>
* Copyright (C) 2008-2009 PetaLogix
* Copyright (C) 2006 Atmark Techno, Inc.
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#ifndef _ASM_MICROBLAZE_MMU_CONTEXT_H
#define _ASM_MICROBLAZE_MMU_CONTEXT_H
#include <asm/atomic.h>
#include <asm/bitops.h>
#include <asm/mmu.h>
#include <asm-generic/mm_hooks.h>
# ifdef __KERNEL__
/*
* This function defines the mapping from contexts to VSIDs (virtual
* segment IDs). We use a skew on both the context and the high 4 bits
* of the 32-bit virtual address (the "effective segment ID") in order
* to spread out the entries in the MMU hash table.
*/
# define CTX_TO_VSID(ctx, va) (((ctx) * (897 * 16) + ((va) >> 28) * 0x111) \
& 0xffffff)
/*
MicroBlaze has 256 contexts, so we can just rotate through these
as a way of "switching" contexts. If the TID of the TLB is zero,
the PID/TID comparison is disabled, so we can use a TID of zero
to represent all kernel pages as shared among all contexts.
*/
static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
{
}
# define NO_CONTEXT 256
# define LAST_CONTEXT 255
# define FIRST_CONTEXT 1
/*
* Set the current MMU context.
* This is done byloading up the segment registers for the user part of the
* address space.
*
* Since the PGD is immediately available, it is much faster to simply
* pass this along as a second parameter, which is required for 8xx and
* can be used for debugging on all processors (if you happen to have
* an Abatron).
*/
extern void set_context(mm_context_t context, pgd_t *pgd);
/*
* Bitmap of contexts in use.
* The size of this bitmap is LAST_CONTEXT + 1 bits.
*/
extern unsigned long context_map[];
/*
* This caches the next context number that we expect to be free.
* Its use is an optimization only, we can't rely on this context
* number to be free, but it usually will be.
*/
extern mm_context_t next_mmu_context;
/*
* Since we don't have sufficient contexts to give one to every task
* that could be in the system, we need to be able to steal contexts.
* These variables support that.
*/
extern atomic_t nr_free_contexts;
extern struct mm_struct *context_mm[LAST_CONTEXT+1];
extern void steal_context(void);
/*
* Get a new mmu context for the address space described by `mm'.
*/
static inline void get_mmu_context(struct mm_struct *mm)
{
mm_context_t ctx;
if (mm->context != NO_CONTEXT)
return;
while (atomic_dec_if_positive(&nr_free_contexts) < 0)
steal_context();
ctx = next_mmu_context;
while (test_and_set_bit(ctx, context_map)) {
ctx = find_next_zero_bit(context_map, LAST_CONTEXT+1, ctx);
if (ctx > LAST_CONTEXT)
ctx = 0;
}
next_mmu_context = (ctx + 1) & LAST_CONTEXT;
mm->context = ctx;
context_mm[ctx] = mm;
}
/*
* Set up the context for a new address space.
*/
# define init_new_context(tsk, mm) (((mm)->context = NO_CONTEXT), 0)
/*
* We're finished using the context for an address space.
*/
static inline void destroy_context(struct mm_struct *mm)
{
if (mm->context != NO_CONTEXT) {
clear_bit(mm->context, context_map);
mm->context = NO_CONTEXT;
atomic_inc(&nr_free_contexts);
}
}
static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
struct task_struct *tsk)
{
tsk->thread.pgdir = next->pgd;
get_mmu_context(next);
set_context(next->context, next->pgd);
}
/*
* After we have set current->mm to a new value, this activates
* the context for the new mm so we see the new mappings.
*/
static inline void activate_mm(struct mm_struct *active_mm,
struct mm_struct *mm)
{
current->thread.pgdir = mm->pgd;
get_mmu_context(mm);
set_context(mm->context, mm->pgd);
}
extern void mmu_context_init(void);
# endif /* __KERNEL__ */
#endif /* _ASM_MICROBLAZE_MMU_CONTEXT_H */
/*
* Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu>
* Copyright (C) 2008-2009 PetaLogix
* Copyright (C) 2006 Atmark Techno, Inc.
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#ifndef _ASM_MICROBLAZE_MMU_CONTEXT_H
#define _ASM_MICROBLAZE_MMU_CONTEXT_H
# define init_new_context(tsk, mm) ({ 0; })
# define enter_lazy_tlb(mm, tsk) do {} while (0)
# define change_mm_context(old, ctx, _pml4) do {} while (0)
# define destroy_context(mm) do {} while (0)
# define deactivate_mm(tsk, mm) do {} while (0)
# define switch_mm(prev, next, tsk) do {} while (0)
# define activate_mm(prev, next) do {} while (0)
#endif /* _ASM_MICROBLAZE_MMU_CONTEXT_H */
/* /*
* Copyright (C) 2008 Michal Simek * VM ops
* Copyright (C) 2008 PetaLogix *
* Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu>
* Copyright (C) 2008-2009 PetaLogix
* Copyright (C) 2006 Atmark Techno, Inc. * Copyright (C) 2006 Atmark Techno, Inc.
* Changes for MMU support: * Changes for MMU support:
* Copyright (C) 2007 Xilinx, Inc. All rights reserved. * Copyright (C) 2007 Xilinx, Inc. All rights reserved.
...@@ -15,14 +17,15 @@ ...@@ -15,14 +17,15 @@
#include <linux/pfn.h> #include <linux/pfn.h>
#include <asm/setup.h> #include <asm/setup.h>
#include <linux/const.h>
#ifdef __KERNEL__
/* PAGE_SHIFT determines the page size */ /* PAGE_SHIFT determines the page size */
#define PAGE_SHIFT (12) #define PAGE_SHIFT (12)
#define PAGE_SIZE (1UL << PAGE_SHIFT) #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
#define PAGE_MASK (~(PAGE_SIZE-1)) #define PAGE_MASK (~(PAGE_SIZE-1))
#ifdef __KERNEL__
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
#define PAGE_UP(addr) (((addr)+((PAGE_SIZE)-1))&(~((PAGE_SIZE)-1))) #define PAGE_UP(addr) (((addr)+((PAGE_SIZE)-1))&(~((PAGE_SIZE)-1)))
...@@ -35,6 +38,7 @@ ...@@ -35,6 +38,7 @@
/* align addr on a size boundary - adjust address up if needed */ /* align addr on a size boundary - adjust address up if needed */
#define _ALIGN(addr, size) _ALIGN_UP(addr, size) #define _ALIGN(addr, size) _ALIGN_UP(addr, size)
#ifndef CONFIG_MMU
/* /*
* PAGE_OFFSET -- the first address of the first page of memory. When not * PAGE_OFFSET -- the first address of the first page of memory. When not
* using MMU this corresponds to the first free page in physical memory (aligned * using MMU this corresponds to the first free page in physical memory (aligned
...@@ -43,15 +47,44 @@ ...@@ -43,15 +47,44 @@
extern unsigned int __page_offset; extern unsigned int __page_offset;
#define PAGE_OFFSET __page_offset #define PAGE_OFFSET __page_offset
#define copy_page(to, from) memcpy((to), (from), PAGE_SIZE) #else /* CONFIG_MMU */
#define get_user_page(vaddr) __get_free_page(GFP_KERNEL)
#define free_user_page(page, addr) free_page(addr)
#define clear_page(pgaddr) memset((pgaddr), 0, PAGE_SIZE) /*
* PAGE_OFFSET -- the first address of the first page of memory. With MMU
* it is set to the kernel start address (aligned on a page boundary).
*
* CONFIG_KERNEL_START is defined in arch/microblaze/config.in and used
* in arch/microblaze/Makefile.
*/
#define PAGE_OFFSET CONFIG_KERNEL_START
/*
* MAP_NR -- given an address, calculate the index of the page struct which
* points to the address's page.
*/
#define MAP_NR(addr) (((unsigned long)(addr) - PAGE_OFFSET) >> PAGE_SHIFT)
#define clear_user_page(pgaddr, vaddr, page) memset((pgaddr), 0, PAGE_SIZE) /*
#define copy_user_page(vto, vfrom, vaddr, topg) \ * The basic type of a PTE - 32 bit physical addressing.
*/
typedef unsigned long pte_basic_t;
#define PTE_SHIFT (PAGE_SHIFT - 2) /* 1024 ptes per page */
#define PTE_FMT "%.8lx"
#endif /* CONFIG_MMU */
# ifndef CONFIG_MMU
# define copy_page(to, from) memcpy((to), (from), PAGE_SIZE)
# define get_user_page(vaddr) __get_free_page(GFP_KERNEL)
# define free_user_page(page, addr) free_page(addr)
# else /* CONFIG_MMU */
extern void copy_page(void *to, void *from);
# endif /* CONFIG_MMU */
# define clear_page(pgaddr) memset((pgaddr), 0, PAGE_SIZE)
# define clear_user_page(pgaddr, vaddr, page) memset((pgaddr), 0, PAGE_SIZE)
# define copy_user_page(vto, vfrom, vaddr, topg) \
memcpy((vto), (vfrom), PAGE_SIZE) memcpy((vto), (vfrom), PAGE_SIZE)
/* /*
...@@ -60,21 +93,32 @@ extern unsigned int __page_offset; ...@@ -60,21 +93,32 @@ extern unsigned int __page_offset;
typedef struct page *pgtable_t; typedef struct page *pgtable_t;
typedef struct { unsigned long pte; } pte_t; typedef struct { unsigned long pte; } pte_t;
typedef struct { unsigned long pgprot; } pgprot_t; typedef struct { unsigned long pgprot; } pgprot_t;
/* FIXME this can depend on linux kernel version */
# ifdef CONFIG_MMU
typedef struct { unsigned long pmd; } pmd_t;
typedef struct { unsigned long pgd; } pgd_t;
# else /* CONFIG_MMU */
typedef struct { unsigned long ste[64]; } pmd_t; typedef struct { unsigned long ste[64]; } pmd_t;
typedef struct { pmd_t pue[1]; } pud_t; typedef struct { pmd_t pue[1]; } pud_t;
typedef struct { pud_t pge[1]; } pgd_t; typedef struct { pud_t pge[1]; } pgd_t;
# endif /* CONFIG_MMU */
# define pte_val(x) ((x).pte)
# define pgprot_val(x) ((x).pgprot)
#define pte_val(x) ((x).pte) # ifdef CONFIG_MMU
#define pgprot_val(x) ((x).pgprot) # define pmd_val(x) ((x).pmd)
#define pmd_val(x) ((x).ste[0]) # define pgd_val(x) ((x).pgd)
#define pud_val(x) ((x).pue[0]) # else /* CONFIG_MMU */
#define pgd_val(x) ((x).pge[0]) # define pmd_val(x) ((x).ste[0])
# define pud_val(x) ((x).pue[0])
# define pgd_val(x) ((x).pge[0])
# endif /* CONFIG_MMU */
#define __pte(x) ((pte_t) { (x) }) # define __pte(x) ((pte_t) { (x) })
#define __pmd(x) ((pmd_t) { (x) }) # define __pmd(x) ((pmd_t) { (x) })
#define __pgd(x) ((pgd_t) { (x) }) # define __pgd(x) ((pgd_t) { (x) })
#define __pgprot(x) ((pgprot_t) { (x) }) # define __pgprot(x) ((pgprot_t) { (x) })
/** /**
* Conversions for virtual address, physical address, pfn, and struct * Conversions for virtual address, physical address, pfn, and struct
...@@ -94,44 +138,80 @@ extern unsigned long max_low_pfn; ...@@ -94,44 +138,80 @@ extern unsigned long max_low_pfn;
extern unsigned long min_low_pfn; extern unsigned long min_low_pfn;
extern unsigned long max_pfn; extern unsigned long max_pfn;
#define __pa(vaddr) ((unsigned long) (vaddr)) extern unsigned long memory_start;
#define __va(paddr) ((void *) (paddr)) extern unsigned long memory_end;
extern unsigned long memory_size;
#define phys_to_pfn(phys) (PFN_DOWN(phys)) extern int page_is_ram(unsigned long pfn);
#define pfn_to_phys(pfn) (PFN_PHYS(pfn))
#define virt_to_pfn(vaddr) (phys_to_pfn((__pa(vaddr)))) # define phys_to_pfn(phys) (PFN_DOWN(phys))
#define pfn_to_virt(pfn) __va(pfn_to_phys((pfn))) # define pfn_to_phys(pfn) (PFN_PHYS(pfn))
#define virt_to_page(vaddr) (pfn_to_page(virt_to_pfn(vaddr))) # define virt_to_pfn(vaddr) (phys_to_pfn((__pa(vaddr))))
#define page_to_virt(page) (pfn_to_virt(page_to_pfn(page))) # define pfn_to_virt(pfn) __va(pfn_to_phys((pfn)))
#define page_to_phys(page) (pfn_to_phys(page_to_pfn(page))) # ifdef CONFIG_MMU
#define page_to_bus(page) (page_to_phys(page)) # define virt_to_page(kaddr) (mem_map + MAP_NR(kaddr))
#define phys_to_page(paddr) (pfn_to_page(phys_to_pfn(paddr))) # else /* CONFIG_MMU */
# define virt_to_page(vaddr) (pfn_to_page(virt_to_pfn(vaddr)))
# define page_to_virt(page) (pfn_to_virt(page_to_pfn(page)))
# define page_to_phys(page) (pfn_to_phys(page_to_pfn(page)))
# define page_to_bus(page) (page_to_phys(page))
# define phys_to_page(paddr) (pfn_to_page(phys_to_pfn(paddr)))
# endif /* CONFIG_MMU */
extern unsigned int memory_start; # ifndef CONFIG_MMU
extern unsigned int memory_end; # define pfn_valid(pfn) ((pfn) >= min_low_pfn && (pfn) <= max_mapnr)
extern unsigned int memory_size; # define ARCH_PFN_OFFSET (PAGE_OFFSET >> PAGE_SHIFT)
# else /* CONFIG_MMU */
# define ARCH_PFN_OFFSET (memory_start >> PAGE_SHIFT)
# define pfn_valid(pfn) ((pfn) < (max_mapnr + ARCH_PFN_OFFSET))
# define VALID_PAGE(page) ((page - mem_map) < max_mapnr)
# endif /* CONFIG_MMU */
#define pfn_valid(pfn) ((pfn) >= min_low_pfn && (pfn) < max_mapnr) # endif /* __ASSEMBLY__ */
#define ARCH_PFN_OFFSET (PAGE_OFFSET >> PAGE_SHIFT) #define virt_addr_valid(vaddr) (pfn_valid(virt_to_pfn(vaddr)))
#else
#define tophys(rd, rs) (addik rd, rs, 0)
#define tovirt(rd, rs) (addik rd, rs, 0)
#endif /* __ASSEMBLY__ */
#define virt_addr_valid(vaddr) (pfn_valid(virt_to_pfn(vaddr))) # ifndef CONFIG_MMU
# define __pa(vaddr) ((unsigned long) (vaddr))
# define __va(paddr) ((void *) (paddr))
# else /* CONFIG_MMU */
# define __pa(x) __virt_to_phys((unsigned long)(x))
# define __va(x) ((void *)__phys_to_virt((unsigned long)(x)))
# endif /* CONFIG_MMU */
/* Convert between virtual and physical address for MMU. */ /* Convert between virtual and physical address for MMU. */
/* Handle MicroBlaze processor with virtual memory. */ /* Handle MicroBlaze processor with virtual memory. */
#ifndef CONFIG_MMU
#define __virt_to_phys(addr) addr #define __virt_to_phys(addr) addr
#define __phys_to_virt(addr) addr #define __phys_to_virt(addr) addr
#define tophys(rd, rs) addik rd, rs, 0
#define tovirt(rd, rs) addik rd, rs, 0
#else
#define __virt_to_phys(addr) \
((addr) + CONFIG_KERNEL_BASE_ADDR - CONFIG_KERNEL_START)
#define __phys_to_virt(addr) \
((addr) + CONFIG_KERNEL_START - CONFIG_KERNEL_BASE_ADDR)
#define tophys(rd, rs) \
addik rd, rs, (CONFIG_KERNEL_BASE_ADDR - CONFIG_KERNEL_START)
#define tovirt(rd, rs) \
addik rd, rs, (CONFIG_KERNEL_START - CONFIG_KERNEL_BASE_ADDR)
#endif /* CONFIG_MMU */
#define TOPHYS(addr) __virt_to_phys(addr) #define TOPHYS(addr) __virt_to_phys(addr)
#ifdef CONFIG_MMU
#ifdef CONFIG_CONTIGUOUS_PAGE_ALLOC
#define WANT_PAGE_VIRTUAL 1 /* page alloc 2 relies on this */
#endif
#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \
VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
#endif /* CONFIG_MMU */
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */
#include <asm-generic/memory_model.h> #include <asm-generic/memory_model.h>
......
/* /*
* Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu>
* Copyright (C) 2008-2009 PetaLogix
* Copyright (C) 2006 Atmark Techno, Inc. * Copyright (C) 2006 Atmark Techno, Inc.
* *
* This file is subject to the terms and conditions of the GNU General Public * This file is subject to the terms and conditions of the GNU General Public
...@@ -9,6 +11,195 @@ ...@@ -9,6 +11,195 @@
#ifndef _ASM_MICROBLAZE_PGALLOC_H #ifndef _ASM_MICROBLAZE_PGALLOC_H
#define _ASM_MICROBLAZE_PGALLOC_H #define _ASM_MICROBLAZE_PGALLOC_H
#ifdef CONFIG_MMU
#include <linux/kernel.h> /* For min/max macros */
#include <linux/highmem.h>
#include <asm/setup.h>
#include <asm/io.h>
#include <asm/page.h>
#include <asm/cache.h>
#define PGDIR_ORDER 0
/*
* This is handled very differently on MicroBlaze since out page tables
* are all 0's and I want to be able to use these zero'd pages elsewhere
* as well - it gives us quite a speedup.
* -- Cort
*/
extern struct pgtable_cache_struct {
unsigned long *pgd_cache;
unsigned long *pte_cache;
unsigned long pgtable_cache_sz;
} quicklists;
#define pgd_quicklist (quicklists.pgd_cache)
#define pmd_quicklist ((unsigned long *)0)
#define pte_quicklist (quicklists.pte_cache)
#define pgtable_cache_size (quicklists.pgtable_cache_sz)
extern unsigned long *zero_cache; /* head linked list of pre-zero'd pages */
extern atomic_t zero_sz; /* # currently pre-zero'd pages */
extern atomic_t zeropage_hits; /* # zero'd pages request that we've done */
extern atomic_t zeropage_calls; /* # zero'd pages request that've been made */
extern atomic_t zerototal; /* # pages zero'd over time */
#define zero_quicklist (zero_cache)
#define zero_cache_sz (zero_sz)
#define zero_cache_calls (zeropage_calls)
#define zero_cache_hits (zeropage_hits)
#define zero_cache_total (zerototal)
/*
* return a pre-zero'd page from the list,
* return NULL if none available -- Cort
*/
extern unsigned long get_zero_page_fast(void);
extern void __bad_pte(pmd_t *pmd);
extern inline pgd_t *get_pgd_slow(void)
{
pgd_t *ret;
ret = (pgd_t *)__get_free_pages(GFP_KERNEL, PGDIR_ORDER);
if (ret != NULL)
clear_page(ret);
return ret;
}
extern inline pgd_t *get_pgd_fast(void)
{
unsigned long *ret;
ret = pgd_quicklist;
if (ret != NULL) {
pgd_quicklist = (unsigned long *)(*ret);
ret[0] = 0;
pgtable_cache_size--;
} else
ret = (unsigned long *)get_pgd_slow();
return (pgd_t *)ret;
}
extern inline void free_pgd_fast(pgd_t *pgd)
{
*(unsigned long **)pgd = pgd_quicklist;
pgd_quicklist = (unsigned long *) pgd;
pgtable_cache_size++;
}
extern inline void free_pgd_slow(pgd_t *pgd)
{
free_page((unsigned long)pgd);
}
#define pgd_free(mm, pgd) free_pgd_fast(pgd)
#define pgd_alloc(mm) get_pgd_fast()
#define pmd_pgtable(pmd) pmd_page(pmd)
/*
* We don't have any real pmd's, and this code never triggers because
* the pgd will always be present..
*/
#define pmd_alloc_one_fast(mm, address) ({ BUG(); ((pmd_t *)1); })
#define pmd_alloc_one(mm, address) ({ BUG(); ((pmd_t *)2); })
/* FIXME two definition - look below */
#define pmd_free(mm, x) do { } while (0)
#define pgd_populate(mm, pmd, pte) BUG()
static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
unsigned long address)
{
pte_t *pte;
extern int mem_init_done;
extern void *early_get_page(void);
if (mem_init_done) {
pte = (pte_t *)__get_free_page(GFP_KERNEL |
__GFP_REPEAT | __GFP_ZERO);
} else {
pte = (pte_t *)early_get_page();
if (pte)
clear_page(pte);
}
return pte;
}
static inline struct page *pte_alloc_one(struct mm_struct *mm,
unsigned long address)
{
struct page *ptepage;
#ifdef CONFIG_HIGHPTE
int flags = GFP_KERNEL | __GFP_HIGHMEM | __GFP_REPEAT;
#else
int flags = GFP_KERNEL | __GFP_REPEAT;
#endif
ptepage = alloc_pages(flags, 0);
if (ptepage)
clear_highpage(ptepage);
return ptepage;
}
static inline pte_t *pte_alloc_one_fast(struct mm_struct *mm,
unsigned long address)
{
unsigned long *ret;
ret = pte_quicklist;
if (ret != NULL) {
pte_quicklist = (unsigned long *)(*ret);
ret[0] = 0;
pgtable_cache_size--;
}
return (pte_t *)ret;
}
extern inline void pte_free_fast(pte_t *pte)
{
*(unsigned long **)pte = pte_quicklist;
pte_quicklist = (unsigned long *) pte;
pgtable_cache_size++;
}
extern inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
{
free_page((unsigned long)pte);
}
extern inline void pte_free_slow(struct page *ptepage)
{
__free_page(ptepage);
}
extern inline void pte_free(struct mm_struct *mm, struct page *ptepage)
{
__free_page(ptepage);
}
#define __pte_free_tlb(tlb, pte) pte_free((tlb)->mm, (pte))
#define pmd_populate(mm, pmd, pte) (pmd_val(*(pmd)) = page_address(pte))
#define pmd_populate_kernel(mm, pmd, pte) \
(pmd_val(*(pmd)) = (unsigned long) (pte))
/*
* We don't have any real pmd's, and this code never triggers because
* the pgd will always be present..
*/
#define pmd_alloc_one(mm, address) ({ BUG(); ((pmd_t *)2); })
/*#define pmd_free(mm, x) do { } while (0)*/
#define __pmd_free_tlb(tlb, x) do { } while (0)
#define pgd_populate(mm, pmd, pte) BUG()
extern int do_check_pgt_cache(int, int);
#endif /* CONFIG_MMU */
#define check_pgt_cache() do {} while (0) #define check_pgt_cache() do {} while (0)
#endif /* _ASM_MICROBLAZE_PGALLOC_H */ #endif /* _ASM_MICROBLAZE_PGALLOC_H */
/* /*
* Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu>
* Copyright (C) 2008-2009 PetaLogix
* Copyright (C) 2006 Atmark Techno, Inc. * Copyright (C) 2006 Atmark Techno, Inc.
* *
* This file is subject to the terms and conditions of the GNU General Public * This file is subject to the terms and conditions of the GNU General Public
...@@ -14,6 +16,8 @@ ...@@ -14,6 +16,8 @@
#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \ #define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
remap_pfn_range(vma, vaddr, pfn, size, prot) remap_pfn_range(vma, vaddr, pfn, size, prot)
#ifndef CONFIG_MMU
#define pgd_present(pgd) (1) /* pages are always present on non MMU */ #define pgd_present(pgd) (1) /* pages are always present on non MMU */
#define pgd_none(pgd) (0) #define pgd_none(pgd) (0)
#define pgd_bad(pgd) (0) #define pgd_bad(pgd) (0)
...@@ -27,6 +31,8 @@ ...@@ -27,6 +31,8 @@
#define PAGE_READONLY __pgprot(0) /* these mean nothing to non MMU */ #define PAGE_READONLY __pgprot(0) /* these mean nothing to non MMU */
#define PAGE_KERNEL __pgprot(0) /* these mean nothing to non MMU */ #define PAGE_KERNEL __pgprot(0) /* these mean nothing to non MMU */
#define pgprot_noncached(x) (x)
#define __swp_type(x) (0) #define __swp_type(x) (0)
#define __swp_offset(x) (0) #define __swp_offset(x) (0)
#define __swp_entry(typ, off) ((swp_entry_t) { ((typ) | ((off) << 7)) }) #define __swp_entry(typ, off) ((swp_entry_t) { ((typ) | ((off) << 7)) })
...@@ -45,6 +51,538 @@ static inline int pte_file(pte_t pte) { return 0; } ...@@ -45,6 +51,538 @@ static inline int pte_file(pte_t pte) { return 0; }
#define arch_enter_lazy_cpu_mode() do {} while (0) #define arch_enter_lazy_cpu_mode() do {} while (0)
#else /* CONFIG_MMU */
#include <asm-generic/4level-fixup.h>
#ifdef __KERNEL__
#ifndef __ASSEMBLY__
#include <linux/sched.h>
#include <linux/threads.h>
#include <asm/processor.h> /* For TASK_SIZE */
#include <asm/mmu.h>
#include <asm/page.h>
#define FIRST_USER_ADDRESS 0
extern unsigned long va_to_phys(unsigned long address);
extern pte_t *va_to_pte(unsigned long address);
extern unsigned long ioremap_bot, ioremap_base;
/*
* The following only work if pte_present() is true.
* Undefined behaviour if not..
*/
static inline int pte_special(pte_t pte) { return 0; }
static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
/* Start and end of the vmalloc area. */
/* Make sure to map the vmalloc area above the pinned kernel memory area
of 32Mb. */
#define VMALLOC_START (CONFIG_KERNEL_START + \
max(32 * 1024 * 1024UL, memory_size))
#define VMALLOC_END ioremap_bot
#define VMALLOC_VMADDR(x) ((unsigned long)(x))
#endif /* __ASSEMBLY__ */
/*
* The MicroBlaze MMU is identical to the PPC-40x MMU, and uses a hash
* table containing PTEs, together with a set of 16 segment registers, to
* define the virtual to physical address mapping.
*
* We use the hash table as an extended TLB, i.e. a cache of currently
* active mappings. We maintain a two-level page table tree, much
* like that used by the i386, for the sake of the Linux memory
* management code. Low-level assembler code in hashtable.S
* (procedure hash_page) is responsible for extracting ptes from the
* tree and putting them into the hash table when necessary, and
* updating the accessed and modified bits in the page table tree.
*/
/*
* The MicroBlaze processor has a TLB architecture identical to PPC-40x. The
* instruction and data sides share a unified, 64-entry, semi-associative
* TLB which is maintained totally under software control. In addition, the
* instruction side has a hardware-managed, 2,4, or 8-entry, fully-associative
* TLB which serves as a first level to the shared TLB. These two TLBs are
* known as the UTLB and ITLB, respectively (see "mmu.h" for definitions).
*/
/*
* The normal case is that PTEs are 32-bits and we have a 1-page
* 1024-entry pgdir pointing to 1-page 1024-entry PTE pages. -- paulus
*
*/
/* PMD_SHIFT determines the size of the area mapped by the PTE pages */
#define PMD_SHIFT (PAGE_SHIFT + PTE_SHIFT)
#define PMD_SIZE (1UL << PMD_SHIFT)
#define PMD_MASK (~(PMD_SIZE-1))
/* PGDIR_SHIFT determines what a top-level page table entry can map */
#define PGDIR_SHIFT PMD_SHIFT
#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
#define PGDIR_MASK (~(PGDIR_SIZE-1))
/*
* entries per page directory level: our page-table tree is two-level, so
* we don't really have any PMD directory.
*/
#define PTRS_PER_PTE (1 << PTE_SHIFT)
#define PTRS_PER_PMD 1
#define PTRS_PER_PGD (1 << (32 - PGDIR_SHIFT))
#define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE)
#define FIRST_USER_PGD_NR 0
#define USER_PGD_PTRS (PAGE_OFFSET >> PGDIR_SHIFT)
#define KERNEL_PGD_PTRS (PTRS_PER_PGD-USER_PGD_PTRS)
#define pte_ERROR(e) \
printk(KERN_ERR "%s:%d: bad pte "PTE_FMT".\n", \
__FILE__, __LINE__, pte_val(e))
#define pmd_ERROR(e) \
printk(KERN_ERR "%s:%d: bad pmd %08lx.\n", \
__FILE__, __LINE__, pmd_val(e))
#define pgd_ERROR(e) \
printk(KERN_ERR "%s:%d: bad pgd %08lx.\n", \
__FILE__, __LINE__, pgd_val(e))
/*
* Bits in a linux-style PTE. These match the bits in the
* (hardware-defined) PTE as closely as possible.
*/
/* There are several potential gotchas here. The hardware TLBLO
* field looks like this:
*
* 0 1 2 3 4 ... 18 19 20 21 22 23 24 25 26 27 28 29 30 31
* RPN..................... 0 0 EX WR ZSEL....... W I M G
*
* Where possible we make the Linux PTE bits match up with this
*
* - bits 20 and 21 must be cleared, because we use 4k pages (4xx can
* support down to 1k pages), this is done in the TLBMiss exception
* handler.
* - We use only zones 0 (for kernel pages) and 1 (for user pages)
* of the 16 available. Bit 24-26 of the TLB are cleared in the TLB
* miss handler. Bit 27 is PAGE_USER, thus selecting the correct
* zone.
* - PRESENT *must* be in the bottom two bits because swap cache
* entries use the top 30 bits. Because 4xx doesn't support SMP
* anyway, M is irrelevant so we borrow it for PAGE_PRESENT. Bit 30
* is cleared in the TLB miss handler before the TLB entry is loaded.
* - All other bits of the PTE are loaded into TLBLO without
* * modification, leaving us only the bits 20, 21, 24, 25, 26, 30 for
* software PTE bits. We actually use use bits 21, 24, 25, and
* 30 respectively for the software bits: ACCESSED, DIRTY, RW, and
* PRESENT.
*/
/* Definitions for MicroBlaze. */
#define _PAGE_GUARDED 0x001 /* G: page is guarded from prefetch */
#define _PAGE_PRESENT 0x002 /* software: PTE contains a translation */
#define _PAGE_NO_CACHE 0x004 /* I: caching is inhibited */
#define _PAGE_WRITETHRU 0x008 /* W: caching is write-through */
#define _PAGE_USER 0x010 /* matches one of the zone permission bits */
#define _PAGE_RW 0x040 /* software: Writes permitted */
#define _PAGE_DIRTY 0x080 /* software: dirty page */
#define _PAGE_HWWRITE 0x100 /* hardware: Dirty & RW, set in exception */
#define _PAGE_HWEXEC 0x200 /* hardware: EX permission */
#define _PAGE_ACCESSED 0x400 /* software: R: page referenced */
#define _PMD_PRESENT PAGE_MASK
/*
* Some bits are unused...
*/
#ifndef _PAGE_HASHPTE
#define _PAGE_HASHPTE 0
#endif
#ifndef _PTE_NONE_MASK
#define _PTE_NONE_MASK 0
#endif
#ifndef _PAGE_SHARED
#define _PAGE_SHARED 0
#endif
#ifndef _PAGE_HWWRITE
#define _PAGE_HWWRITE 0
#endif
#ifndef _PAGE_HWEXEC
#define _PAGE_HWEXEC 0
#endif
#ifndef _PAGE_EXEC
#define _PAGE_EXEC 0
#endif
#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
/*
* Note: the _PAGE_COHERENT bit automatically gets set in the hardware
* PTE if CONFIG_SMP is defined (hash_page does this); there is no need
* to have it in the Linux PTE, and in fact the bit could be reused for
* another purpose. -- paulus.
*/
#define _PAGE_BASE (_PAGE_PRESENT | _PAGE_ACCESSED)
#define _PAGE_WRENABLE (_PAGE_RW | _PAGE_DIRTY | _PAGE_HWWRITE)
#define _PAGE_KERNEL \
(_PAGE_BASE | _PAGE_WRENABLE | _PAGE_SHARED | _PAGE_HWEXEC)
#define _PAGE_IO (_PAGE_KERNEL | _PAGE_NO_CACHE | _PAGE_GUARDED)
#define PAGE_NONE __pgprot(_PAGE_BASE)
#define PAGE_READONLY __pgprot(_PAGE_BASE | _PAGE_USER)
#define PAGE_READONLY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC)
#define PAGE_SHARED __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW)
#define PAGE_SHARED_X \
__pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW | _PAGE_EXEC)
#define PAGE_COPY __pgprot(_PAGE_BASE | _PAGE_USER)
#define PAGE_COPY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC)
#define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
#define PAGE_KERNEL_RO __pgprot(_PAGE_BASE | _PAGE_SHARED)
#define PAGE_KERNEL_CI __pgprot(_PAGE_IO)
/*
* We consider execute permission the same as read.
* Also, write permissions imply read permissions.
*/
#define __P000 PAGE_NONE
#define __P001 PAGE_READONLY_X
#define __P010 PAGE_COPY
#define __P011 PAGE_COPY_X
#define __P100 PAGE_READONLY
#define __P101 PAGE_READONLY_X
#define __P110 PAGE_COPY
#define __P111 PAGE_COPY_X
#define __S000 PAGE_NONE
#define __S001 PAGE_READONLY_X
#define __S010 PAGE_SHARED
#define __S011 PAGE_SHARED_X
#define __S100 PAGE_READONLY
#define __S101 PAGE_READONLY_X
#define __S110 PAGE_SHARED
#define __S111 PAGE_SHARED_X
#ifndef __ASSEMBLY__
/*
* ZERO_PAGE is a global shared page that is always zero: used
* for zero-mapped memory areas etc..
*/
extern unsigned long empty_zero_page[1024];
#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
#endif /* __ASSEMBLY__ */
#define pte_none(pte) ((pte_val(pte) & ~_PTE_NONE_MASK) == 0)
#define pte_present(pte) (pte_val(pte) & _PAGE_PRESENT)
#define pte_clear(mm, addr, ptep) \
do { set_pte_at((mm), (addr), (ptep), __pte(0)); } while (0)
#define pmd_none(pmd) (!pmd_val(pmd))
#define pmd_bad(pmd) ((pmd_val(pmd) & _PMD_PRESENT) == 0)
#define pmd_present(pmd) ((pmd_val(pmd) & _PMD_PRESENT) != 0)
#define pmd_clear(pmdp) do { pmd_val(*(pmdp)) = 0; } while (0)
#define pte_page(x) (mem_map + (unsigned long) \
((pte_val(x) - memory_start) >> PAGE_SHIFT))
#define PFN_SHIFT_OFFSET (PAGE_SHIFT)
#define pte_pfn(x) (pte_val(x) >> PFN_SHIFT_OFFSET)
#define pfn_pte(pfn, prot) \
__pte(((pte_basic_t)(pfn) << PFN_SHIFT_OFFSET) | pgprot_val(prot))
#ifndef __ASSEMBLY__
/*
* The "pgd_xxx()" functions here are trivial for a folded two-level
* setup: the pgd is never bad, and a pmd always exists (as it's folded
* into the pgd entry)
*/
static inline int pgd_none(pgd_t pgd) { return 0; }
static inline int pgd_bad(pgd_t pgd) { return 0; }
static inline int pgd_present(pgd_t pgd) { return 1; }
#define pgd_clear(xp) do { } while (0)
#define pgd_page(pgd) \
((unsigned long) __va(pgd_val(pgd) & PAGE_MASK))
/*
* The following only work if pte_present() is true.
* Undefined behaviour if not..
*/
static inline int pte_read(pte_t pte) { return pte_val(pte) & _PAGE_USER; }
static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_RW; }
static inline int pte_exec(pte_t pte) { return pte_val(pte) & _PAGE_EXEC; }
static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; }
static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }
/* FIXME */
static inline int pte_file(pte_t pte) { return 0; }
static inline void pte_uncache(pte_t pte) { pte_val(pte) |= _PAGE_NO_CACHE; }
static inline void pte_cache(pte_t pte) { pte_val(pte) &= ~_PAGE_NO_CACHE; }
static inline pte_t pte_rdprotect(pte_t pte) \
{ pte_val(pte) &= ~_PAGE_USER; return pte; }
static inline pte_t pte_wrprotect(pte_t pte) \
{ pte_val(pte) &= ~(_PAGE_RW | _PAGE_HWWRITE); return pte; }
static inline pte_t pte_exprotect(pte_t pte) \
{ pte_val(pte) &= ~_PAGE_EXEC; return pte; }
static inline pte_t pte_mkclean(pte_t pte) \
{ pte_val(pte) &= ~(_PAGE_DIRTY | _PAGE_HWWRITE); return pte; }
static inline pte_t pte_mkold(pte_t pte) \
{ pte_val(pte) &= ~_PAGE_ACCESSED; return pte; }
static inline pte_t pte_mkread(pte_t pte) \
{ pte_val(pte) |= _PAGE_USER; return pte; }
static inline pte_t pte_mkexec(pte_t pte) \
{ pte_val(pte) |= _PAGE_USER | _PAGE_EXEC; return pte; }
static inline pte_t pte_mkwrite(pte_t pte) \
{ pte_val(pte) |= _PAGE_RW; return pte; }
static inline pte_t pte_mkdirty(pte_t pte) \
{ pte_val(pte) |= _PAGE_DIRTY; return pte; }
static inline pte_t pte_mkyoung(pte_t pte) \
{ pte_val(pte) |= _PAGE_ACCESSED; return pte; }
/*
* Conversion functions: convert a page and protection to a page entry,
* and a page entry and page directory to the page they refer to.
*/
static inline pte_t mk_pte_phys(phys_addr_t physpage, pgprot_t pgprot)
{
pte_t pte;
pte_val(pte) = physpage | pgprot_val(pgprot);
return pte;
}
#define mk_pte(page, pgprot) \
({ \
pte_t pte; \
pte_val(pte) = (((page - mem_map) << PAGE_SHIFT) + memory_start) | \
pgprot_val(pgprot); \
pte; \
})
static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
{
pte_val(pte) = (pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot);
return pte;
}
/*
* Atomic PTE updates.
*
* pte_update clears and sets bit atomically, and returns
* the old pte value.
* The ((unsigned long)(p+1) - 4) hack is to get to the least-significant
* 32 bits of the PTE regardless of whether PTEs are 32 or 64 bits.
*/
static inline unsigned long pte_update(pte_t *p, unsigned long clr,
unsigned long set)
{
unsigned long old, tmp, msr;
__asm__ __volatile__("\
msrclr %2, 0x2\n\
nop\n\
lw %0, %4, r0\n\
andn %1, %0, %5\n\
or %1, %1, %6\n\
sw %1, %4, r0\n\
mts rmsr, %2\n\
nop"
: "=&r" (old), "=&r" (tmp), "=&r" (msr), "=m" (*p)
: "r" ((unsigned long)(p+1) - 4), "r" (clr), "r" (set), "m" (*p)
: "cc");
return old;
}
/*
* set_pte stores a linux PTE into the linux page table.
*/
static inline void set_pte(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, pte_t pte)
{
*ptep = pte;
}
static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, pte_t pte)
{
*ptep = pte;
}
static inline int ptep_test_and_clear_young(struct mm_struct *mm,
unsigned long addr, pte_t *ptep)
{
return (pte_update(ptep, _PAGE_ACCESSED, 0) & _PAGE_ACCESSED) != 0;
}
static inline int ptep_test_and_clear_dirty(struct mm_struct *mm,
unsigned long addr, pte_t *ptep)
{
return (pte_update(ptep, \
(_PAGE_DIRTY | _PAGE_HWWRITE), 0) & _PAGE_DIRTY) != 0;
}
static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
unsigned long addr, pte_t *ptep)
{
return __pte(pte_update(ptep, ~_PAGE_HASHPTE, 0));
}
/*static inline void ptep_set_wrprotect(struct mm_struct *mm,
unsigned long addr, pte_t *ptep)
{
pte_update(ptep, (_PAGE_RW | _PAGE_HWWRITE), 0);
}*/
static inline void ptep_mkdirty(struct mm_struct *mm,
unsigned long addr, pte_t *ptep)
{
pte_update(ptep, 0, _PAGE_DIRTY);
}
/*#define pte_same(A,B) (((pte_val(A) ^ pte_val(B)) & ~_PAGE_HASHPTE) == 0)*/
/* Convert pmd entry to page */
/* our pmd entry is an effective address of pte table*/
/* returns effective address of the pmd entry*/
#define pmd_page_kernel(pmd) ((unsigned long) (pmd_val(pmd) & PAGE_MASK))
/* returns struct *page of the pmd entry*/
#define pmd_page(pmd) (pfn_to_page(__pa(pmd_val(pmd)) >> PAGE_SHIFT))
/* to find an entry in a kernel page-table-directory */
#define pgd_offset_k(address) pgd_offset(&init_mm, address)
/* to find an entry in a page-table-directory */
#define pgd_index(address) ((address) >> PGDIR_SHIFT)
#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
/* Find an entry in the second-level page table.. */
static inline pmd_t *pmd_offset(pgd_t *dir, unsigned long address)
{
return (pmd_t *) dir;
}
/* Find an entry in the third-level page table.. */
#define pte_index(address) \
(((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
#define pte_offset_kernel(dir, addr) \
((pte_t *) pmd_page_kernel(*(dir)) + pte_index(addr))
#define pte_offset_map(dir, addr) \
((pte_t *) kmap_atomic(pmd_page(*(dir)), KM_PTE0) + pte_index(addr))
#define pte_offset_map_nested(dir, addr) \
((pte_t *) kmap_atomic(pmd_page(*(dir)), KM_PTE1) + pte_index(addr))
#define pte_unmap(pte) kunmap_atomic(pte, KM_PTE0)
#define pte_unmap_nested(pte) kunmap_atomic(pte, KM_PTE1)
/* Encode and decode a nonlinear file mapping entry */
#define PTE_FILE_MAX_BITS 29
#define pte_to_pgoff(pte) (pte_val(pte) >> 3)
#define pgoff_to_pte(off) ((pte_t) { ((off) << 3) })
extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
/*
* When flushing the tlb entry for a page, we also need to flush the hash
* table entry. flush_hash_page is assembler (for speed) in hashtable.S.
*/
extern int flush_hash_page(unsigned context, unsigned long va, pte_t *ptep);
/* Add an HPTE to the hash table */
extern void add_hash_page(unsigned context, unsigned long va, pte_t *ptep);
/*
* Encode and decode a swap entry.
* Note that the bits we use in a PTE for representing a swap entry
* must not include the _PAGE_PRESENT bit, or the _PAGE_HASHPTE bit
* (if used). -- paulus
*/
#define __swp_type(entry) ((entry).val & 0x3f)
#define __swp_offset(entry) ((entry).val >> 6)
#define __swp_entry(type, offset) \
((swp_entry_t) { (type) | ((offset) << 6) })
#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) >> 2 })
#define __swp_entry_to_pte(x) ((pte_t) { (x).val << 2 })
/* CONFIG_APUS */
/* For virtual address to physical address conversion */
extern void cache_clear(__u32 addr, int length);
extern void cache_push(__u32 addr, int length);
extern int mm_end_of_chunk(unsigned long addr, int len);
extern unsigned long iopa(unsigned long addr);
/* extern unsigned long mm_ptov(unsigned long addr) \
__attribute__ ((const)); TBD */
/* Values for nocacheflag and cmode */
/* These are not used by the APUS kernel_map, but prevents
* compilation errors.
*/
#define IOMAP_FULL_CACHING 0
#define IOMAP_NOCACHE_SER 1
#define IOMAP_NOCACHE_NONSER 2
#define IOMAP_NO_COPYBACK 3
/*
* Map some physical address range into the kernel address space.
*/
extern unsigned long kernel_map(unsigned long paddr, unsigned long size,
int nocacheflag, unsigned long *memavailp);
/*
* Set cache mode of (kernel space) address range.
*/
extern void kernel_set_cachemode(unsigned long address, unsigned long size,
unsigned int cmode);
/* Needs to be defined here and not in linux/mm.h, as it is arch dependent */
#define kern_addr_valid(addr) (1)
#define io_remap_page_range remap_page_range
/*
* No page table caches to initialise
*/
#define pgtable_cache_init() do { } while (0)
void do_page_fault(struct pt_regs *regs, unsigned long address,
unsigned long error_code);
void __init io_block_mapping(unsigned long virt, phys_addr_t phys,
unsigned int size, int flags);
void __init adjust_total_lowmem(void);
void mapin_ram(void);
int map_page(unsigned long va, phys_addr_t pa, int flags);
extern int mem_init_done;
extern unsigned long ioremap_base;
extern unsigned long ioremap_bot;
asmlinkage void __init mmu_init(void);
void __init *early_get_page(void);
void *consistent_alloc(int gfp, size_t size, dma_addr_t *dma_handle);
void consistent_free(void *vaddr);
void consistent_sync(void *vaddr, size_t size, int direction);
void consistent_sync_page(struct page *page, unsigned long offset,
size_t size, int direction);
#endif /* __ASSEMBLY__ */
#endif /* __KERNEL__ */
#endif /* CONFIG_MMU */
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
#include <asm-generic/pgtable.h> #include <asm-generic/pgtable.h>
......
...@@ -16,7 +16,7 @@ ...@@ -16,7 +16,7 @@
*/ */
typedef unsigned long __kernel_ino_t; typedef unsigned long __kernel_ino_t;
typedef unsigned int __kernel_mode_t; typedef unsigned short __kernel_mode_t;
typedef unsigned int __kernel_nlink_t; typedef unsigned int __kernel_nlink_t;
typedef long __kernel_off_t; typedef long __kernel_off_t;
typedef int __kernel_pid_t; typedef int __kernel_pid_t;
......
/* /*
* Copyright (C) 2008 Michal Simek * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu>
* Copyright (C) 2008 PetaLogix * Copyright (C) 2008-2009 PetaLogix
* Copyright (C) 2006 Atmark Techno, Inc. * Copyright (C) 2006 Atmark Techno, Inc.
* *
* This file is subject to the terms and conditions of the GNU General Public * This file is subject to the terms and conditions of the GNU General Public
...@@ -26,14 +26,15 @@ extern const struct seq_operations cpuinfo_op; ...@@ -26,14 +26,15 @@ extern const struct seq_operations cpuinfo_op;
# define cpu_sleep() do {} while (0) # define cpu_sleep() do {} while (0)
# define prepare_to_copy(tsk) do {} while (0) # define prepare_to_copy(tsk) do {} while (0)
# endif /* __ASSEMBLY__ */
#define task_pt_regs(tsk) \ #define task_pt_regs(tsk) \
(((struct pt_regs *)(THREAD_SIZE + task_stack_page(tsk))) - 1) (((struct pt_regs *)(THREAD_SIZE + task_stack_page(tsk))) - 1)
/* Do necessary setup to start up a newly executed thread. */ /* Do necessary setup to start up a newly executed thread. */
void start_thread(struct pt_regs *regs, unsigned long pc, unsigned long usp); void start_thread(struct pt_regs *regs, unsigned long pc, unsigned long usp);
# endif /* __ASSEMBLY__ */
# ifndef CONFIG_MMU
/* /*
* User space process size: memory size * User space process size: memory size
* *
...@@ -85,4 +86,90 @@ extern int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags); ...@@ -85,4 +86,90 @@ extern int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags);
# define KSTK_EIP(tsk) (0) # define KSTK_EIP(tsk) (0)
# define KSTK_ESP(tsk) (0) # define KSTK_ESP(tsk) (0)
# else /* CONFIG_MMU */
/*
* This is used to define STACK_TOP, and with MMU it must be below
* kernel base to select the correct PGD when handling MMU exceptions.
*/
# define TASK_SIZE (CONFIG_KERNEL_START)
/*
* This decides where the kernel will search for a free chunk of vm
* space during mmap's.
*/
# define TASK_UNMAPPED_BASE (TASK_SIZE / 8 * 3)
# define THREAD_KSP 0
# ifndef __ASSEMBLY__
/*
* Default implementation of macro that returns current
* instruction pointer ("program counter").
*/
# define current_text_addr() ({ __label__ _l; _l: &&_l; })
/* If you change this, you must change the associated assembly-languages
* constants defined below, THREAD_*.
*/
struct thread_struct {
/* kernel stack pointer (must be first field in structure) */
unsigned long ksp;
unsigned long ksp_limit; /* if ksp <= ksp_limit stack overflow */
void *pgdir; /* root of page-table tree */
struct pt_regs *regs; /* Pointer to saved register state */
};
# define INIT_THREAD { \
.ksp = sizeof init_stack + (unsigned long)init_stack, \
.pgdir = swapper_pg_dir, \
}
/* Do necessary setup to start up a newly executed thread. */
void start_thread(struct pt_regs *regs,
unsigned long pc, unsigned long usp);
/* Free all resources held by a thread. */
extern inline void release_thread(struct task_struct *dead_task)
{
}
extern int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags);
/* Free current thread data structures etc. */
static inline void exit_thread(void)
{
}
/* Return saved (kernel) PC of a blocked thread. */
# define thread_saved_pc(tsk) \
((tsk)->thread.regs ? (tsk)->thread.regs->r15 : 0)
unsigned long get_wchan(struct task_struct *p);
/* The size allocated for kernel stacks. This _must_ be a power of two! */
# define KERNEL_STACK_SIZE 0x2000
/* Return some info about the user process TASK. */
# define task_tos(task) ((unsigned long)(task) + KERNEL_STACK_SIZE)
# define task_regs(task) ((struct pt_regs *)task_tos(task) - 1)
# define task_pt_regs_plus_args(tsk) \
(((void *)task_pt_regs(tsk)) - STATE_SAVE_ARG_SPACE)
# define task_sp(task) (task_regs(task)->r1)
# define task_pc(task) (task_regs(task)->pc)
/* Grotty old names for some. */
# define KSTK_EIP(task) (task_pc(task))
# define KSTK_ESP(task) (task_sp(task))
/* FIXME */
# define deactivate_mm(tsk, mm) do { } while (0)
# define STACK_TOP TASK_SIZE
# define STACK_TOP_MAX STACK_TOP
# endif /* __ASSEMBLY__ */
# endif /* CONFIG_MMU */
#endif /* _ASM_MICROBLAZE_PROCESSOR_H */ #endif /* _ASM_MICROBLAZE_PROCESSOR_H */
...@@ -10,7 +10,6 @@ ...@@ -10,7 +10,6 @@
#define _ASM_MICROBLAZE_PTRACE_H #define _ASM_MICROBLAZE_PTRACE_H
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
#include <linux/types.h>
typedef unsigned long microblaze_reg_t; typedef unsigned long microblaze_reg_t;
......
/* /*
* Copyright (C) 2008 Michal Simek * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu>
* Copyright (C) 2008 PetaLogix * Copyright (C) 2008-2009 PetaLogix
* Copyright (C) 2006 Atmark Techno, Inc. * Copyright (C) 2006 Atmark Techno, Inc.
* *
* This file is subject to the terms and conditions of the GNU General Public * This file is subject to the terms and conditions of the GNU General Public
...@@ -30,4 +30,21 @@ ...@@ -30,4 +30,21 @@
#define FSR_UF (1<<1) /* Underflow */ #define FSR_UF (1<<1) /* Underflow */
#define FSR_DO (1<<0) /* Denormalized operand error */ #define FSR_DO (1<<0) /* Denormalized operand error */
# ifdef CONFIG_MMU
/* Machine State Register (MSR) Fields */
# define MSR_UM (1<<11) /* User Mode */
# define MSR_UMS (1<<12) /* User Mode Save */
# define MSR_VM (1<<13) /* Virtual Mode */
# define MSR_VMS (1<<14) /* Virtual Mode Save */
# define MSR_KERNEL (MSR_EE | MSR_VM)
/* # define MSR_USER (MSR_KERNEL | MSR_UM | MSR_IE) */
# define MSR_KERNEL_VMS (MSR_EE | MSR_VMS)
/* # define MSR_USER_VMS (MSR_KERNEL_VMS | MSR_UMS | MSR_IE) */
/* Exception State Register (ESR) Fields */
# define ESR_DIZ (1<<11) /* Zone Protection */
# define ESR_S (1<<10) /* Store instruction */
# endif /* CONFIG_MMU */
#endif /* _ASM_MICROBLAZE_REGISTERS_H */ #endif /* _ASM_MICROBLAZE_REGISTERS_H */
/* /*
* Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu>
* Copyright (C) 2008-2009 PetaLogix
* Copyright (C) 2006 Atmark Techno, Inc. * Copyright (C) 2006 Atmark Techno, Inc.
* *
* This file is subject to the terms and conditions of the GNU General Public * This file is subject to the terms and conditions of the GNU General Public
...@@ -14,6 +16,7 @@ ...@@ -14,6 +16,7 @@
# ifndef __ASSEMBLY__ # ifndef __ASSEMBLY__
extern char _ssbss[], _esbss[]; extern char _ssbss[], _esbss[];
extern unsigned long __ivt_start[], __ivt_end[]; extern unsigned long __ivt_start[], __ivt_end[];
extern char _etext[], _stext[];
# ifdef CONFIG_MTD_UCLINUX # ifdef CONFIG_MTD_UCLINUX
extern char *_ebss; extern char *_ebss;
......
/* /*
* Copyright (C) 2008 Michal Simek * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu>
* Copyright (C) 2008 PetaLogix * Copyright (C) 2008-2009 PetaLogix
* Copyright (C) 2006 Atmark Techno, Inc. * Copyright (C) 2006 Atmark Techno, Inc.
* *
* This file is subject to the terms and conditions of the GNU General Public * This file is subject to the terms and conditions of the GNU General Public
...@@ -11,7 +11,7 @@ ...@@ -11,7 +11,7 @@
#ifndef _ASM_MICROBLAZE_SEGMENT_H #ifndef _ASM_MICROBLAZE_SEGMENT_H
#define _ASM_MICROBLAZE_SEGMENT_H #define _ASM_MICROBLAZE_SEGMENT_H
#ifndef __ASSEMBLY__ # ifndef __ASSEMBLY__
typedef struct { typedef struct {
unsigned long seg; unsigned long seg;
...@@ -29,13 +29,19 @@ typedef struct { ...@@ -29,13 +29,19 @@ typedef struct {
* *
* For non-MMU arch like Microblaze, KERNEL_DS and USER_DS is equal. * For non-MMU arch like Microblaze, KERNEL_DS and USER_DS is equal.
*/ */
# define KERNEL_DS ((mm_segment_t){0}) # define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })
# ifndef CONFIG_MMU
# define KERNEL_DS MAKE_MM_SEG(0)
# define USER_DS KERNEL_DS # define USER_DS KERNEL_DS
# else
# define KERNEL_DS MAKE_MM_SEG(0xFFFFFFFF)
# define USER_DS MAKE_MM_SEG(TASK_SIZE - 1)
# endif
# define get_ds() (KERNEL_DS) # define get_ds() (KERNEL_DS)
# define get_fs() (current_thread_info()->addr_limit) # define get_fs() (current_thread_info()->addr_limit)
# define set_fs(x) \ # define set_fs(val) (current_thread_info()->addr_limit = (val))
do { current_thread_info()->addr_limit = (x); } while (0)
# define segment_eq(a, b) ((a).seg == (b).seg) # define segment_eq(a, b) ((a).seg == (b).seg)
......
/* /*
* Copyright (C) 2007-2008 Michal Simek <monstr@monstr.eu> * Copyright (C) 2007-2009 Michal Simek <monstr@monstr.eu>
* Copyright (C) 2007-2009 PetaLogix
* Copyright (C) 2006 Atmark Techno, Inc. * Copyright (C) 2006 Atmark Techno, Inc.
* *
* This file is subject to the terms and conditions of the GNU General Public * This file is subject to the terms and conditions of the GNU General Public
...@@ -18,7 +19,6 @@ ...@@ -18,7 +19,6 @@
extern unsigned int boot_cpuid; /* move to smp.h */ extern unsigned int boot_cpuid; /* move to smp.h */
extern char cmd_line[COMMAND_LINE_SIZE]; extern char cmd_line[COMMAND_LINE_SIZE];
# endif/* __KERNEL__ */
void early_printk(const char *fmt, ...); void early_printk(const char *fmt, ...);
...@@ -30,6 +30,11 @@ void setup_heartbeat(void); ...@@ -30,6 +30,11 @@ void setup_heartbeat(void);
unsigned long long sched_clock(void); unsigned long long sched_clock(void);
# ifdef CONFIG_MMU
extern void mmu_reset(void);
extern void early_console_reg_tlb_alloc(unsigned int addr);
# endif /* CONFIG_MMU */
void time_init(void); void time_init(void);
void init_IRQ(void); void init_IRQ(void);
void machine_early_init(const char *cmdline, unsigned int ram, void machine_early_init(const char *cmdline, unsigned int ram,
...@@ -40,5 +45,6 @@ void machine_shutdown(void); ...@@ -40,5 +45,6 @@ void machine_shutdown(void);
void machine_halt(void); void machine_halt(void);
void machine_power_off(void); void machine_power_off(void);
# endif/* __KERNEL__ */
# endif /* __ASSEMBLY__ */ # endif /* __ASSEMBLY__ */
#endif /* _ASM_MICROBLAZE_SETUP_H */ #endif /* _ASM_MICROBLAZE_SETUP_H */
...@@ -16,58 +16,53 @@ ...@@ -16,58 +16,53 @@
#include <linux/posix_types.h> #include <linux/posix_types.h>
#define STAT_HAVE_NSEC 1
struct stat { struct stat {
unsigned int st_dev; unsigned long st_dev;
unsigned long st_ino; unsigned long st_ino;
unsigned int st_mode; unsigned int st_mode;
unsigned int st_nlink; unsigned int st_nlink;
unsigned int st_uid; unsigned int st_uid;
unsigned int st_gid; unsigned int st_gid;
unsigned int st_rdev; unsigned long st_rdev;
unsigned long st_size; unsigned long __pad1;
unsigned long st_blksize; long st_size;
unsigned long st_blocks; int st_blksize;
unsigned long st_atime; int __pad2;
unsigned long __unused1; /* unsigned long st_atime_nsec */ long st_blocks;
unsigned long st_mtime; int st_atime;
unsigned long __unused2; /* unsigned long st_mtime_nsec */ unsigned int st_atime_nsec;
unsigned long st_ctime; int st_mtime;
unsigned long __unused3; /* unsigned long st_ctime_nsec */ unsigned int st_mtime_nsec;
int st_ctime;
unsigned int st_ctime_nsec;
unsigned long __unused4; unsigned long __unused4;
unsigned long __unused5; unsigned long __unused5;
}; };
struct stat64 { struct stat64 {
unsigned long long st_dev; unsigned long long st_dev; /* Device. */
unsigned long __unused1; unsigned long long st_ino; /* File serial number. */
unsigned int st_mode; /* File mode. */
unsigned long long st_ino; unsigned int st_nlink; /* Link count. */
unsigned int st_uid; /* User ID of the file's owner. */
unsigned int st_mode; unsigned int st_gid; /* Group ID of the file's group. */
unsigned int st_nlink; unsigned long long st_rdev; /* Device number, if device. */
unsigned long long __pad1;
unsigned int st_uid; long long st_size; /* Size of file, in bytes. */
unsigned int st_gid; int st_blksize; /* Optimal block size for I/O. */
int __pad2;
unsigned long long st_rdev; long long st_blocks; /* Number 512-byte blocks allocated. */
unsigned long __unused3; int st_atime; /* Time of last access. */
unsigned int st_atime_nsec;
long long st_size; int st_mtime; /* Time of last modification. */
unsigned long st_blksize; unsigned int st_mtime_nsec;
int st_ctime; /* Time of last status change. */
unsigned long st_blocks; /* No. of 512-byte blocks allocated */ unsigned int st_ctime_nsec;
unsigned long __unused4; /* future possible st_blocks high bits */ unsigned int __unused4;
unsigned int __unused5;
unsigned long st_atime;
unsigned long st_atime_nsec;
unsigned long st_mtime;
unsigned long st_mtime_nsec;
unsigned long st_ctime;
unsigned long st_ctime_nsec;
unsigned long __unused8;
}; };
#endif /* _ASM_MICROBLAZE_STAT_H */ #endif /* _ASM_MICROBLAZE_STAT_H */
...@@ -9,7 +9,7 @@ ...@@ -9,7 +9,7 @@
#ifndef _ASM_MICROBLAZE_STRING_H #ifndef _ASM_MICROBLAZE_STRING_H
#define _ASM_MICROBLAZE_STRING_H #define _ASM_MICROBLAZE_STRING_H
#ifndef __KERNEL__ #ifdef __KERNEL__
#define __HAVE_ARCH_MEMSET #define __HAVE_ARCH_MEMSET
#define __HAVE_ARCH_MEMCPY #define __HAVE_ARCH_MEMCPY
......
...@@ -34,6 +34,9 @@ asmlinkage int sys_rt_sigsuspend(sigset_t __user *unewset, size_t sigsetsize, ...@@ -34,6 +34,9 @@ asmlinkage int sys_rt_sigsuspend(sigset_t __user *unewset, size_t sigsetsize,
asmlinkage int sys_sigaction(int sig, const struct old_sigaction *act, asmlinkage int sys_sigaction(int sig, const struct old_sigaction *act,
struct old_sigaction *oact); struct old_sigaction *oact);
asmlinkage long sys_rt_sigaction(int sig, const struct sigaction __user *act,
struct sigaction __user *oact, size_t sigsetsize);
asmlinkage int sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss, asmlinkage int sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss,
struct pt_regs *regs); struct pt_regs *regs);
......
...@@ -122,6 +122,8 @@ static inline struct thread_info *current_thread_info(void) ...@@ -122,6 +122,8 @@ static inline struct thread_info *current_thread_info(void)
#define TIF_SINGLESTEP 4 #define TIF_SINGLESTEP 4
#define TIF_IRET 5 /* return with iret */ #define TIF_IRET 5 /* return with iret */
#define TIF_MEMDIE 6 #define TIF_MEMDIE 6
#define TIF_SYSCALL_AUDIT 9 /* syscall auditing active */
#define TIF_SECCOMP 10 /* secure computing */
#define TIF_FREEZE 14 /* Freezing for suspend */ #define TIF_FREEZE 14 /* Freezing for suspend */
/* FIXME change in entry.S */ /* FIXME change in entry.S */
...@@ -138,10 +140,17 @@ static inline struct thread_info *current_thread_info(void) ...@@ -138,10 +140,17 @@ static inline struct thread_info *current_thread_info(void)
#define _TIF_IRET (1<<TIF_IRET) #define _TIF_IRET (1<<TIF_IRET)
#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG) #define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
#define _TIF_FREEZE (1<<TIF_FREEZE) #define _TIF_FREEZE (1<<TIF_FREEZE)
#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
#define _TIF_SECCOMP (1 << TIF_SECCOMP)
#define _TIF_KERNEL_TRACE (1 << TIF_KERNEL_TRACE) #define _TIF_KERNEL_TRACE (1 << TIF_KERNEL_TRACE)
/* work to do in syscall trace */
#define _TIF_WORK_SYSCALL_MASK (_TIF_SYSCALL_TRACE | _TIF_SINGLESTEP | \
_TIF_SYSCALL_AUDIT | _TIF_SECCOMP)
/* work to do on interrupt/exception return */ /* work to do on interrupt/exception return */
#define _TIF_WORK_MASK 0x0000FFFE #define _TIF_WORK_MASK 0x0000FFFE
/* work to do on any return to u-space */ /* work to do on any return to u-space */
#define _TIF_ALLWORK_MASK 0x0000FFFF #define _TIF_ALLWORK_MASK 0x0000FFFF
...@@ -154,6 +163,17 @@ static inline struct thread_info *current_thread_info(void) ...@@ -154,6 +163,17 @@ static inline struct thread_info *current_thread_info(void)
*/ */
/* FPU was used by this task this quantum (SMP) */ /* FPU was used by this task this quantum (SMP) */
#define TS_USEDFPU 0x0001 #define TS_USEDFPU 0x0001
#define TS_RESTORE_SIGMASK 0x0002
#ifndef __ASSEMBLY__
#define HAVE_SET_RESTORE_SIGMASK 1
static inline void set_restore_sigmask(void)
{
struct thread_info *ti = current_thread_info();
ti->status |= TS_RESTORE_SIGMASK;
set_bit(TIF_SIGPENDING, (unsigned long *)&ti->flags);
}
#endif
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */
#endif /* _ASM_MICROBLAZE_THREAD_INFO_H */ #endif /* _ASM_MICROBLAZE_THREAD_INFO_H */
/* /*
* Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu>
* Copyright (C) 2008-2009 PetaLogix
* Copyright (C) 2006 Atmark Techno, Inc. * Copyright (C) 2006 Atmark Techno, Inc.
* *
* This file is subject to the terms and conditions of the GNU General Public * This file is subject to the terms and conditions of the GNU General Public
...@@ -13,4 +15,10 @@ ...@@ -13,4 +15,10 @@
#include <asm-generic/tlb.h> #include <asm-generic/tlb.h>
#ifdef CONFIG_MMU
#define tlb_start_vma(tlb, vma) do { } while (0)
#define tlb_end_vma(tlb, vma) do { } while (0)
#define __tlb_remove_tlb_entry(tlb, pte, address) do { } while (0)
#endif
#endif /* _ASM_MICROBLAZE_TLB_H */ #endif /* _ASM_MICROBLAZE_TLB_H */
/* /*
* Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu>
* Copyright (C) 2008-2009 PetaLogix
* Copyright (C) 2006 Atmark Techno, Inc. * Copyright (C) 2006 Atmark Techno, Inc.
* *
* This file is subject to the terms and conditions of the GNU General Public * This file is subject to the terms and conditions of the GNU General Public
...@@ -9,6 +11,50 @@ ...@@ -9,6 +11,50 @@
#ifndef _ASM_MICROBLAZE_TLBFLUSH_H #ifndef _ASM_MICROBLAZE_TLBFLUSH_H
#define _ASM_MICROBLAZE_TLBFLUSH_H #define _ASM_MICROBLAZE_TLBFLUSH_H
#ifdef CONFIG_MMU
#include <linux/sched.h>
#include <linux/threads.h>
#include <asm/processor.h> /* For TASK_SIZE */
#include <asm/mmu.h>
#include <asm/page.h>
#include <asm/pgalloc.h>
extern void _tlbie(unsigned long address);
extern void _tlbia(void);
#define __tlbia() _tlbia()
static inline void local_flush_tlb_all(void)
{ __tlbia(); }
static inline void local_flush_tlb_mm(struct mm_struct *mm)
{ __tlbia(); }
static inline void local_flush_tlb_page(struct vm_area_struct *vma,
unsigned long vmaddr)
{ _tlbie(vmaddr); }
static inline void local_flush_tlb_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end)
{ __tlbia(); }
#define flush_tlb_kernel_range(start, end) do { } while (0)
#define update_mmu_cache(vma, addr, pte) do { } while (0)
#define flush_tlb_all local_flush_tlb_all
#define flush_tlb_mm local_flush_tlb_mm
#define flush_tlb_page local_flush_tlb_page
#define flush_tlb_range local_flush_tlb_range
/*
* This is called in munmap when we have freed up some page-table
* pages. We don't need to do anything here, there's nothing special
* about our page-table pages. -- paulus
*/
static inline void flush_tlb_pgtables(struct mm_struct *mm,
unsigned long start, unsigned long end) { }
#else /* CONFIG_MMU */
#define flush_tlb() BUG() #define flush_tlb() BUG()
#define flush_tlb_all() BUG() #define flush_tlb_all() BUG()
#define flush_tlb_mm(mm) BUG() #define flush_tlb_mm(mm) BUG()
...@@ -17,4 +63,6 @@ ...@@ -17,4 +63,6 @@
#define flush_tlb_pgtables(mm, start, end) BUG() #define flush_tlb_pgtables(mm, start, end) BUG()
#define flush_tlb_kernel_range(start, end) BUG() #define flush_tlb_kernel_range(start, end) BUG()
#endif /* CONFIG_MMU */
#endif /* _ASM_MICROBLAZE_TLBFLUSH_H */ #endif /* _ASM_MICROBLAZE_TLBFLUSH_H */
/* /*
* Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu>
* Copyright (C) 2008-2009 PetaLogix
* Copyright (C) 2006 Atmark Techno, Inc. * Copyright (C) 2006 Atmark Techno, Inc.
* *
* This file is subject to the terms and conditions of the GNU General Public * This file is subject to the terms and conditions of the GNU General Public
...@@ -26,6 +28,10 @@ ...@@ -26,6 +28,10 @@
#define VERIFY_READ 0 #define VERIFY_READ 0
#define VERIFY_WRITE 1 #define VERIFY_WRITE 1
#define __clear_user(addr, n) (memset((void *)(addr), 0, (n)), 0)
#ifndef CONFIG_MMU
extern int ___range_ok(unsigned long addr, unsigned long size); extern int ___range_ok(unsigned long addr, unsigned long size);
#define __range_ok(addr, size) \ #define __range_ok(addr, size) \
...@@ -34,13 +40,12 @@ extern int ___range_ok(unsigned long addr, unsigned long size); ...@@ -34,13 +40,12 @@ extern int ___range_ok(unsigned long addr, unsigned long size);
#define access_ok(type, addr, size) (__range_ok((addr), (size)) == 0) #define access_ok(type, addr, size) (__range_ok((addr), (size)) == 0)
#define __access_ok(add, size) (__range_ok((addr), (size)) == 0) #define __access_ok(add, size) (__range_ok((addr), (size)) == 0)
extern inline int bad_user_access_length(void) /* Undefined function to trigger linker error */
{ extern int bad_user_access_length(void);
return 0;
}
/* FIXME this is function for optimalization -> memcpy */ /* FIXME this is function for optimalization -> memcpy */
#define __get_user(var, ptr) \ #define __get_user(var, ptr) \
({ \ ({ \
int __gu_err = 0; \ int __gu_err = 0; \
switch (sizeof(*(ptr))) { \ switch (sizeof(*(ptr))) { \
case 1: \ case 1: \
...@@ -57,12 +62,13 @@ extern inline int bad_user_access_length(void) ...@@ -57,12 +62,13 @@ extern inline int bad_user_access_length(void)
break; \ break; \
} \ } \
__gu_err; \ __gu_err; \
}) })
#define __get_user_bad() (bad_user_access_length(), (-EFAULT)) #define __get_user_bad() (bad_user_access_length(), (-EFAULT))
/* FIXME is not there defined __pu_val */
#define __put_user(var, ptr) \ #define __put_user(var, ptr) \
({ \ ({ \
int __pu_err = 0; \ int __pu_err = 0; \
switch (sizeof(*(ptr))) { \ switch (sizeof(*(ptr))) { \
case 1: \ case 1: \
...@@ -71,8 +77,8 @@ extern inline int bad_user_access_length(void) ...@@ -71,8 +77,8 @@ extern inline int bad_user_access_length(void)
*(ptr) = (var); \ *(ptr) = (var); \
break; \ break; \
case 8: { \ case 8: { \
typeof(*(ptr)) __pu_val = var; \ typeof(*(ptr)) __pu_val = (var); \
memcpy(ptr, &__pu_val, sizeof(__pu_val));\ memcpy(ptr, &__pu_val, sizeof(__pu_val)); \
} \ } \
break; \ break; \
default: \ default: \
...@@ -80,22 +86,22 @@ extern inline int bad_user_access_length(void) ...@@ -80,22 +86,22 @@ extern inline int bad_user_access_length(void)
break; \ break; \
} \ } \
__pu_err; \ __pu_err; \
}) })
#define __put_user_bad() (bad_user_access_length(), (-EFAULT)) #define __put_user_bad() (bad_user_access_length(), (-EFAULT))
#define put_user(x, ptr) __put_user(x, ptr) #define put_user(x, ptr) __put_user((x), (ptr))
#define get_user(x, ptr) __get_user(x, ptr) #define get_user(x, ptr) __get_user((x), (ptr))
#define copy_to_user(to, from, n) (memcpy(to, from, n), 0)
#define copy_from_user(to, from, n) (memcpy(to, from, n), 0)
#define __copy_to_user(to, from, n) (copy_to_user(to, from, n)) #define copy_to_user(to, from, n) (memcpy((to), (from), (n)), 0)
#define __copy_from_user(to, from, n) (copy_from_user(to, from, n)) #define copy_from_user(to, from, n) (memcpy((to), (from), (n)), 0)
#define __copy_to_user_inatomic(to, from, n) (__copy_to_user(to, from, n))
#define __copy_from_user_inatomic(to, from, n) (__copy_from_user(to, from, n))
#define __clear_user(addr, n) (memset((void *)addr, 0, n), 0) #define __copy_to_user(to, from, n) (copy_to_user((to), (from), (n)))
#define __copy_from_user(to, from, n) (copy_from_user((to), (from), (n)))
#define __copy_to_user_inatomic(to, from, n) \
(__copy_to_user((to), (from), (n)))
#define __copy_from_user_inatomic(to, from, n) \
(__copy_from_user((to), (from), (n)))
static inline unsigned long clear_user(void *addr, unsigned long size) static inline unsigned long clear_user(void *addr, unsigned long size)
{ {
...@@ -107,10 +113,197 @@ static inline unsigned long clear_user(void *addr, unsigned long size) ...@@ -107,10 +113,197 @@ static inline unsigned long clear_user(void *addr, unsigned long size)
/* Returns 0 if exception not found and fixup otherwise. */ /* Returns 0 if exception not found and fixup otherwise. */
extern unsigned long search_exception_table(unsigned long); extern unsigned long search_exception_table(unsigned long);
extern long strncpy_from_user(char *dst, const char *src, long count);
extern long strnlen_user(const char *src, long count);
#else /* CONFIG_MMU */
/*
* Address is valid if:
* - "addr", "addr + size" and "size" are all below the limit
*/
#define access_ok(type, addr, size) \
(get_fs().seg > (((unsigned long)(addr)) | \
(size) | ((unsigned long)(addr) + (size))))
/* || printk("access_ok failed for %s at 0x%08lx (size %d), seg 0x%08x\n",
type?"WRITE":"READ",addr,size,get_fs().seg)) */
/*
* All the __XXX versions macros/functions below do not perform
* access checking. It is assumed that the necessary checks have been
* already performed before the finction (macro) is called.
*/
#define get_user(x, ptr) \
({ \
access_ok(VERIFY_READ, (ptr), sizeof(*(ptr))) \
? __get_user((x), (ptr)) : -EFAULT; \
})
#define put_user(x, ptr) \
({ \
access_ok(VERIFY_WRITE, (ptr), sizeof(*(ptr))) \
? __put_user((x), (ptr)) : -EFAULT; \
})
#define __get_user(x, ptr) \
({ \
unsigned long __gu_val; \
/*unsigned long __gu_ptr = (unsigned long)(ptr);*/ \
long __gu_err; \
switch (sizeof(*(ptr))) { \
case 1: \
__get_user_asm("lbu", (ptr), __gu_val, __gu_err); \
break; \
case 2: \
__get_user_asm("lhu", (ptr), __gu_val, __gu_err); \
break; \
case 4: \
__get_user_asm("lw", (ptr), __gu_val, __gu_err); \
break; \
default: \
__gu_val = 0; __gu_err = -EINVAL; \
} \
x = (__typeof__(*(ptr))) __gu_val; \
__gu_err; \
})
#define __get_user_asm(insn, __gu_ptr, __gu_val, __gu_err) \
({ \
__asm__ __volatile__ ( \
"1:" insn " %1, %2, r0; \
addk %0, r0, r0; \
2: \
.section .fixup,\"ax\"; \
3: brid 2b; \
addik %0, r0, %3; \
.previous; \
.section __ex_table,\"a\"; \
.word 1b,3b; \
.previous;" \
: "=r"(__gu_err), "=r"(__gu_val) \
: "r"(__gu_ptr), "i"(-EFAULT) \
); \
})
#define __put_user(x, ptr) \
({ \
__typeof__(*(ptr)) __gu_val = x; \
long __gu_err = 0; \
switch (sizeof(__gu_val)) { \
case 1: \
__put_user_asm("sb", (ptr), __gu_val, __gu_err); \
break; \
case 2: \
__put_user_asm("sh", (ptr), __gu_val, __gu_err); \
break; \
case 4: \
__put_user_asm("sw", (ptr), __gu_val, __gu_err); \
break; \
case 8: \
__put_user_asm_8((ptr), __gu_val, __gu_err); \
break; \
default: \
__gu_err = -EINVAL; \
} \
__gu_err; \
})
#define __put_user_asm_8(__gu_ptr, __gu_val, __gu_err) \
({ \
__asm__ __volatile__ (" lwi %0, %1, 0; \
1: swi %0, %2, 0; \
lwi %0, %1, 4; \
2: swi %0, %2, 4; \
addk %0,r0,r0; \
3: \
.section .fixup,\"ax\"; \
4: brid 3b; \
addik %0, r0, %3; \
.previous; \
.section __ex_table,\"a\"; \
.word 1b,4b,2b,4b; \
.previous;" \
: "=&r"(__gu_err) \
: "r"(&__gu_val), \
"r"(__gu_ptr), "i"(-EFAULT) \
); \
})
#define __put_user_asm(insn, __gu_ptr, __gu_val, __gu_err) \
({ \
__asm__ __volatile__ ( \
"1:" insn " %1, %2, r0; \
addk %0, r0, r0; \
2: \
.section .fixup,\"ax\"; \
3: brid 2b; \
addik %0, r0, %3; \
.previous; \
.section __ex_table,\"a\"; \
.word 1b,3b; \
.previous;" \
: "=r"(__gu_err) \
: "r"(__gu_val), "r"(__gu_ptr), "i"(-EFAULT) \
); \
})
/*
* Return: number of not copied bytes, i.e. 0 if OK or non-zero if fail.
*/
static inline int clear_user(char *to, int size)
{
if (size && access_ok(VERIFY_WRITE, to, size)) {
__asm__ __volatile__ (" \
1: \
sb r0, %2, r0; \
addik %0, %0, -1; \
bneid %0, 1b; \
addik %2, %2, 1; \
2: \
.section __ex_table,\"a\"; \
.word 1b,2b; \
.section .text;" \
: "=r"(size) \
: "0"(size), "r"(to)
);
}
return size;
}
extern unsigned long __copy_tofrom_user(void __user *to,
const void __user *from, unsigned long size);
#define copy_to_user(to, from, n) \
(access_ok(VERIFY_WRITE, (to), (n)) ? \
__copy_tofrom_user((void __user *)(to), \
(__force const void __user *)(from), (n)) \
: -EFAULT)
#define __copy_to_user(to, from, n) copy_to_user((to), (from), (n))
#define __copy_to_user_inatomic(to, from, n) copy_to_user((to), (from), (n))
#define copy_from_user(to, from, n) \
(access_ok(VERIFY_READ, (from), (n)) ? \
__copy_tofrom_user((__force void __user *)(to), \
(void __user *)(from), (n)) \
: -EFAULT)
#define __copy_from_user(to, from, n) copy_from_user((to), (from), (n))
#define __copy_from_user_inatomic(to, from, n) \
copy_from_user((to), (from), (n))
extern int __strncpy_user(char *to, const char __user *from, int len);
extern int __strnlen_user(const char __user *sstr, int len);
#define strncpy_from_user(to, from, len) \
(access_ok(VERIFY_READ, from, 1) ? \
__strncpy_user(to, from, len) : -EFAULT)
#define strnlen_user(str, len) \
(access_ok(VERIFY_READ, str, 1) ? __strnlen_user(str, len) : 0)
extern long strncpy_from_user(char *dst, const char __user *src, long count); #endif /* CONFIG_MMU */
extern long strnlen_user(const char __user *src, long count);
extern long __strncpy_from_user(char *dst, const char __user *src, long count);
/* /*
* The exception table consists of pairs of addresses: the first is the * The exception table consists of pairs of addresses: the first is the
......
...@@ -12,7 +12,8 @@ ...@@ -12,7 +12,8 @@
# ifdef __KERNEL__ # ifdef __KERNEL__
# include <linux/unaligned/access_ok.h> # include <linux/unaligned/be_struct.h>
# include <linux/unaligned/le_byteshift.h>
# include <linux/unaligned/generic.h> # include <linux/unaligned/generic.h>
# define get_unaligned __get_unaligned_be # define get_unaligned __get_unaligned_be
......
...@@ -15,5 +15,6 @@ obj-$(CONFIG_EARLY_PRINTK) += early_printk.o ...@@ -15,5 +15,6 @@ obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
obj-$(CONFIG_SELFMOD) += selfmod.o obj-$(CONFIG_SELFMOD) += selfmod.o
obj-$(CONFIG_HEART_BEAT) += heartbeat.o obj-$(CONFIG_HEART_BEAT) += heartbeat.o
obj-$(CONFIG_MODULES) += microblaze_ksyms.o module.o obj-$(CONFIG_MODULES) += microblaze_ksyms.o module.o
obj-$(CONFIG_MMU) += misc.o
obj-y += entry$(MMUEXT).o obj-y += entry$(MMUEXT).o
/* /*
* Copyright (C) 2007-2009 Michal Simek <monstr@monstr.eu>
* Copyright (C) 2007-2009 PetaLogix * Copyright (C) 2007-2009 PetaLogix
* Copyright (C) 2006 Atmark Techno, Inc. * Copyright (C) 2006 Atmark Techno, Inc.
* *
...@@ -68,16 +69,26 @@ int main(int argc, char *argv[]) ...@@ -68,16 +69,26 @@ int main(int argc, char *argv[])
/* struct task_struct */ /* struct task_struct */
DEFINE(TS_THREAD_INFO, offsetof(struct task_struct, stack)); DEFINE(TS_THREAD_INFO, offsetof(struct task_struct, stack));
#ifdef CONFIG_MMU
DEFINE(TASK_STATE, offsetof(struct task_struct, state));
DEFINE(TASK_FLAGS, offsetof(struct task_struct, flags));
DEFINE(TASK_PTRACE, offsetof(struct task_struct, ptrace));
DEFINE(TASK_BLOCKED, offsetof(struct task_struct, blocked));
DEFINE(TASK_MM, offsetof(struct task_struct, mm));
DEFINE(TASK_ACTIVE_MM, offsetof(struct task_struct, active_mm));
DEFINE(TASK_PID, offsetof(struct task_struct, pid));
DEFINE(TASK_THREAD, offsetof(struct task_struct, thread));
DEFINE(THREAD_KSP, offsetof(struct thread_struct, ksp));
BLANK();
DEFINE(PGDIR, offsetof(struct thread_struct, pgdir));
BLANK();
#endif
/* struct thread_info */ /* struct thread_info */
DEFINE(TI_TASK, offsetof(struct thread_info, task)); DEFINE(TI_TASK, offsetof(struct thread_info, task));
DEFINE(TI_EXEC_DOMAIN, offsetof(struct thread_info, exec_domain));
DEFINE(TI_FLAGS, offsetof(struct thread_info, flags)); DEFINE(TI_FLAGS, offsetof(struct thread_info, flags));
DEFINE(TI_STATUS, offsetof(struct thread_info, status));
DEFINE(TI_CPU, offsetof(struct thread_info, cpu));
DEFINE(TI_PRE_COUNT, offsetof(struct thread_info, preempt_count));
DEFINE(TI_ADDR_LIMIT, offsetof(struct thread_info, addr_limit)); DEFINE(TI_ADDR_LIMIT, offsetof(struct thread_info, addr_limit));
DEFINE(TI_RESTART_BLOCK, offsetof(struct thread_info, restart_block));
DEFINE(TI_CPU_CONTEXT, offsetof(struct thread_info, cpu_context)); DEFINE(TI_CPU_CONTEXT, offsetof(struct thread_info, cpu_context));
BLANK(); BLANK();
......
...@@ -87,6 +87,9 @@ int __init setup_early_printk(char *opt) ...@@ -87,6 +87,9 @@ int __init setup_early_printk(char *opt)
base_addr = early_uartlite_console(); base_addr = early_uartlite_console();
if (base_addr) { if (base_addr) {
early_console_initialized = 1; early_console_initialized = 1;
#ifdef CONFIG_MMU
early_console_reg_tlb_alloc(base_addr);
#endif
early_printk("early_printk_console is enabled at 0x%08x\n", early_printk("early_printk_console is enabled at 0x%08x\n",
base_addr); base_addr);
......
...@@ -10,7 +10,7 @@ ...@@ -10,7 +10,7 @@
#include <linux/linkage.h> #include <linux/linkage.h>
#include <asm/thread_info.h> #include <asm/thread_info.h>
#include <asm/errno.h> #include <linux/errno.h>
#include <asm/entry.h> #include <asm/entry.h>
#include <asm/asm-offsets.h> #include <asm/asm-offsets.h>
#include <asm/registers.h> #include <asm/registers.h>
......
/*
* Low-level system-call handling, trap handlers and context-switching
*
* Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu>
* Copyright (C) 2008-2009 PetaLogix
* Copyright (C) 2003 John Williams <jwilliams@itee.uq.edu.au>
* Copyright (C) 2001,2002 NEC Corporation
* Copyright (C) 2001,2002 Miles Bader <miles@gnu.org>
*
* This file is subject to the terms and conditions of the GNU General
* Public License. See the file COPYING in the main directory of this
* archive for more details.
*
* Written by Miles Bader <miles@gnu.org>
* Heavily modified by John Williams for Microblaze
*/
#include <linux/sys.h>
#include <linux/linkage.h>
#include <asm/entry.h>
#include <asm/current.h>
#include <asm/processor.h>
#include <asm/exceptions.h>
#include <asm/asm-offsets.h>
#include <asm/thread_info.h>
#include <asm/page.h>
#include <asm/unistd.h>
#include <linux/errno.h>
#include <asm/signal.h>
/* The size of a state save frame. */
#define STATE_SAVE_SIZE (PT_SIZE + STATE_SAVE_ARG_SPACE)
/* The offset of the struct pt_regs in a `state save frame' on the stack. */
#define PTO STATE_SAVE_ARG_SPACE /* 24 the space for args */
#define C_ENTRY(name) .globl name; .align 4; name
/*
* Various ways of setting and clearing BIP in flags reg.
* This is mucky, but necessary using microblaze version that
* allows msr ops to write to BIP
*/
#if CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR
.macro clear_bip
msrclr r11, MSR_BIP
nop
.endm
.macro set_bip
msrset r11, MSR_BIP
nop
.endm
.macro clear_eip
msrclr r11, MSR_EIP
nop
.endm
.macro set_ee
msrset r11, MSR_EE
nop
.endm
.macro disable_irq
msrclr r11, MSR_IE
nop
.endm
.macro enable_irq
msrset r11, MSR_IE
nop
.endm
.macro set_ums
msrset r11, MSR_UMS
nop
msrclr r11, MSR_VMS
nop
.endm
.macro set_vms
msrclr r11, MSR_UMS
nop
msrset r11, MSR_VMS
nop
.endm
.macro clear_vms_ums
msrclr r11, MSR_VMS
nop
msrclr r11, MSR_UMS
nop
.endm
#else
.macro clear_bip
mfs r11, rmsr
nop
andi r11, r11, ~MSR_BIP
mts rmsr, r11
nop
.endm
.macro set_bip
mfs r11, rmsr
nop
ori r11, r11, MSR_BIP
mts rmsr, r11
nop
.endm
.macro clear_eip
mfs r11, rmsr
nop
andi r11, r11, ~MSR_EIP
mts rmsr, r11
nop
.endm
.macro set_ee
mfs r11, rmsr
nop
ori r11, r11, MSR_EE
mts rmsr, r11
nop
.endm
.macro disable_irq
mfs r11, rmsr
nop
andi r11, r11, ~MSR_IE
mts rmsr, r11
nop
.endm
.macro enable_irq
mfs r11, rmsr
nop
ori r11, r11, MSR_IE
mts rmsr, r11
nop
.endm
.macro set_ums
mfs r11, rmsr
nop
ori r11, r11, MSR_VMS
andni r11, r11, MSR_UMS
mts rmsr, r11
nop
.endm
.macro set_vms
mfs r11, rmsr
nop
ori r11, r11, MSR_VMS
andni r11, r11, MSR_UMS
mts rmsr, r11
nop
.endm
.macro clear_vms_ums
mfs r11, rmsr
nop
andni r11, r11, (MSR_VMS|MSR_UMS)
mts rmsr,r11
nop
.endm
#endif
/* Define how to call high-level functions. With MMU, virtual mode must be
* enabled when calling the high-level function. Clobbers R11.
* VM_ON, VM_OFF, DO_JUMP_BIPCLR, DO_CALL
*/
/* turn on virtual protected mode save */
#define VM_ON \
set_ums; \
rted r0, 2f; \
2: nop;
/* turn off virtual protected mode save and user mode save*/
#define VM_OFF \
clear_vms_ums; \
rted r0, TOPHYS(1f); \
1: nop;
#define SAVE_REGS \
swi r2, r1, PTO+PT_R2; /* Save SDA */ \
swi r5, r1, PTO+PT_R5; \
swi r6, r1, PTO+PT_R6; \
swi r7, r1, PTO+PT_R7; \
swi r8, r1, PTO+PT_R8; \
swi r9, r1, PTO+PT_R9; \
swi r10, r1, PTO+PT_R10; \
swi r11, r1, PTO+PT_R11; /* save clobbered regs after rval */\
swi r12, r1, PTO+PT_R12; \
swi r13, r1, PTO+PT_R13; /* Save SDA2 */ \
swi r14, r1, PTO+PT_PC; /* PC, before IRQ/trap */ \
swi r15, r1, PTO+PT_R15; /* Save LP */ \
swi r18, r1, PTO+PT_R18; /* Save asm scratch reg */ \
swi r19, r1, PTO+PT_R19; \
swi r20, r1, PTO+PT_R20; \
swi r21, r1, PTO+PT_R21; \
swi r22, r1, PTO+PT_R22; \
swi r23, r1, PTO+PT_R23; \
swi r24, r1, PTO+PT_R24; \
swi r25, r1, PTO+PT_R25; \
swi r26, r1, PTO+PT_R26; \
swi r27, r1, PTO+PT_R27; \
swi r28, r1, PTO+PT_R28; \
swi r29, r1, PTO+PT_R29; \
swi r30, r1, PTO+PT_R30; \
swi r31, r1, PTO+PT_R31; /* Save current task reg */ \
mfs r11, rmsr; /* save MSR */ \
nop; \
swi r11, r1, PTO+PT_MSR;
#define RESTORE_REGS \
lwi r11, r1, PTO+PT_MSR; \
mts rmsr , r11; \
nop; \
lwi r2, r1, PTO+PT_R2; /* restore SDA */ \
lwi r5, r1, PTO+PT_R5; \
lwi r6, r1, PTO+PT_R6; \
lwi r7, r1, PTO+PT_R7; \
lwi r8, r1, PTO+PT_R8; \
lwi r9, r1, PTO+PT_R9; \
lwi r10, r1, PTO+PT_R10; \
lwi r11, r1, PTO+PT_R11; /* restore clobbered regs after rval */\
lwi r12, r1, PTO+PT_R12; \
lwi r13, r1, PTO+PT_R13; /* restore SDA2 */ \
lwi r14, r1, PTO+PT_PC; /* RESTORE_LINK PC, before IRQ/trap */\
lwi r15, r1, PTO+PT_R15; /* restore LP */ \
lwi r18, r1, PTO+PT_R18; /* restore asm scratch reg */ \
lwi r19, r1, PTO+PT_R19; \
lwi r20, r1, PTO+PT_R20; \
lwi r21, r1, PTO+PT_R21; \
lwi r22, r1, PTO+PT_R22; \
lwi r23, r1, PTO+PT_R23; \
lwi r24, r1, PTO+PT_R24; \
lwi r25, r1, PTO+PT_R25; \
lwi r26, r1, PTO+PT_R26; \
lwi r27, r1, PTO+PT_R27; \
lwi r28, r1, PTO+PT_R28; \
lwi r29, r1, PTO+PT_R29; \
lwi r30, r1, PTO+PT_R30; \
lwi r31, r1, PTO+PT_R31; /* Restore cur task reg */
.text
/*
* User trap.
*
* System calls are handled here.
*
* Syscall protocol:
* Syscall number in r12, args in r5-r10
* Return value in r3
*
* Trap entered via brki instruction, so BIP bit is set, and interrupts
* are masked. This is nice, means we don't have to CLI before state save
*/
C_ENTRY(_user_exception):
swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)) /* save stack */
addi r14, r14, 4 /* return address is 4 byte after call */
swi r11, r0, TOPHYS(r0_ram + PTO + PT_R11); /* Save r11 */
lwi r11, r0, TOPHYS(PER_CPU(KM));/* See if already in kernel mode.*/
beqi r11, 1f; /* Jump ahead if coming from user */
/* Kernel-mode state save. */
lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP)); /* Reload kernel stack-ptr*/
tophys(r1,r11);
swi r11, r1, (PT_R1-PT_SIZE); /* Save original SP. */
lwi r11, r0, TOPHYS(r0_ram + PTO + PT_R11); /* restore r11 */
addik r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack. */
SAVE_REGS
addi r11, r0, 1; /* Was in kernel-mode. */
swi r11, r1, PTO+PT_MODE; /* pt_regs -> kernel mode */
brid 2f;
nop; /* Fill delay slot */
/* User-mode state save. */
1:
lwi r11, r0, TOPHYS(r0_ram + PTO + PT_R11); /* restore r11 */
lwi r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */
tophys(r1,r1);
lwi r1, r1, TS_THREAD_INFO; /* get stack from task_struct */
/* calculate kernel stack pointer from task struct 8k */
addik r1, r1, THREAD_SIZE;
tophys(r1,r1);
addik r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack. */
SAVE_REGS
swi r0, r1, PTO+PT_MODE; /* Was in user-mode. */
lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP));
swi r11, r1, PTO+PT_R1; /* Store user SP. */
addi r11, r0, 1;
swi r11, r0, TOPHYS(PER_CPU(KM)); /* Now we're in kernel-mode. */
2: lwi r31, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */
/* Save away the syscall number. */
swi r12, r1, PTO+PT_R0;
tovirt(r1,r1)
la r15, r0, ret_from_trap-8
/* where the trap should return need -8 to adjust for rtsd r15, 8*/
/* Jump to the appropriate function for the system call number in r12
* (r12 is not preserved), or return an error if r12 is not valid. The LP
* register should point to the location where
* the called function should return. [note that MAKE_SYS_CALL uses label 1] */
/* See if the system call number is valid. */
addi r11, r12, -__NR_syscalls;
bgei r11,1f;
/* Figure out which function to use for this system call. */
/* Note Microblaze barrel shift is optional, so don't rely on it */
add r12, r12, r12; /* convert num -> ptr */
add r12, r12, r12;
/* Trac syscalls and stored them to r0_ram */
lwi r3, r12, 0x400 + TOPHYS(r0_ram)
addi r3, r3, 1
swi r3, r12, 0x400 + TOPHYS(r0_ram)
lwi r12, r12, TOPHYS(sys_call_table); /* Function ptr */
/* Make the system call. to r12*/
set_vms;
rtid r12, 0;
nop;
/* The syscall number is invalid, return an error. */
1: VM_ON; /* RETURN() expects virtual mode*/
addi r3, r0, -ENOSYS;
rtsd r15,8; /* looks like a normal subroutine return */
or r0, r0, r0
/* Entry point used to return from a syscall/trap. */
/* We re-enable BIP bit before state restore */
C_ENTRY(ret_from_trap):
set_bip; /* Ints masked for state restore*/
lwi r11, r1, PTO+PT_MODE;
/* See if returning to kernel mode, if so, skip resched &c. */
bnei r11, 2f;
/* We're returning to user mode, so check for various conditions that
* trigger rescheduling. */
/* Get current task ptr into r11 */
add r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */
lwi r11, r11, TS_THREAD_INFO; /* get thread info */
lwi r11, r11, TI_FLAGS; /* get flags in thread info */
andi r11, r11, _TIF_NEED_RESCHED;
beqi r11, 5f;
swi r3, r1, PTO + PT_R3; /* store syscall result */
swi r4, r1, PTO + PT_R4;
bralid r15, schedule; /* Call scheduler */
nop; /* delay slot */
lwi r3, r1, PTO + PT_R3; /* restore syscall result */
lwi r4, r1, PTO + PT_R4;
/* Maybe handle a signal */
5: add r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */
lwi r11, r11, TS_THREAD_INFO; /* get thread info */
lwi r11, r11, TI_FLAGS; /* get flags in thread info */
andi r11, r11, _TIF_SIGPENDING;
beqi r11, 1f; /* Signals to handle, handle them */
swi r3, r1, PTO + PT_R3; /* store syscall result */
swi r4, r1, PTO + PT_R4;
la r5, r1, PTO; /* Arg 1: struct pt_regs *regs */
add r6, r0, r0; /* Arg 2: sigset_t *oldset */
addi r7, r0, 1; /* Arg 3: int in_syscall */
bralid r15, do_signal; /* Handle any signals */
nop;
lwi r3, r1, PTO + PT_R3; /* restore syscall result */
lwi r4, r1, PTO + PT_R4;
/* Finally, return to user state. */
1: swi r0, r0, PER_CPU(KM); /* Now officially in user state. */
add r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */
swi r11, r0, PER_CPU(CURRENT_SAVE); /* save current */
VM_OFF;
tophys(r1,r1);
RESTORE_REGS;
addik r1, r1, STATE_SAVE_SIZE /* Clean up stack space. */
lwi r1, r1, PT_R1 - PT_SIZE;/* Restore user stack pointer. */
bri 6f;
/* Return to kernel state. */
2: VM_OFF;
tophys(r1,r1);
RESTORE_REGS;
addik r1, r1, STATE_SAVE_SIZE /* Clean up stack space. */
tovirt(r1,r1);
6:
TRAP_return: /* Make global symbol for debugging */
rtbd r14, 0; /* Instructions to return from an IRQ */
nop;
/* These syscalls need access to the struct pt_regs on the stack, so we
implement them in assembly (they're basically all wrappers anyway). */
C_ENTRY(sys_fork_wrapper):
addi r5, r0, SIGCHLD /* Arg 0: flags */
lwi r6, r1, PTO+PT_R1 /* Arg 1: child SP (use parent's) */
la r7, r1, PTO /* Arg 2: parent context */
add r8. r0, r0 /* Arg 3: (unused) */
add r9, r0, r0; /* Arg 4: (unused) */
add r10, r0, r0; /* Arg 5: (unused) */
brid do_fork /* Do real work (tail-call) */
nop;
/* This the initial entry point for a new child thread, with an appropriate
stack in place that makes it look the the child is in the middle of an
syscall. This function is actually `returned to' from switch_thread
(copy_thread makes ret_from_fork the return address in each new thread's
saved context). */
C_ENTRY(ret_from_fork):
bralid r15, schedule_tail; /* ...which is schedule_tail's arg */
add r3, r5, r0; /* switch_thread returns the prev task */
/* ( in the delay slot ) */
add r3, r0, r0; /* Child's fork call should return 0. */
brid ret_from_trap; /* Do normal trap return */
nop;
C_ENTRY(sys_vfork_wrapper):
la r5, r1, PTO
brid sys_vfork /* Do real work (tail-call) */
nop
C_ENTRY(sys_clone_wrapper):
bnei r6, 1f; /* See if child SP arg (arg 1) is 0. */
lwi r6, r1, PTO+PT_R1; /* If so, use paret's stack ptr */
1: la r7, r1, PTO; /* Arg 2: parent context */
add r8, r0, r0; /* Arg 3: (unused) */
add r9, r0, r0; /* Arg 4: (unused) */
add r10, r0, r0; /* Arg 5: (unused) */
brid do_fork /* Do real work (tail-call) */
nop;
C_ENTRY(sys_execve_wrapper):
la r8, r1, PTO; /* add user context as 4th arg */
brid sys_execve; /* Do real work (tail-call).*/
nop;
C_ENTRY(sys_sigsuspend_wrapper):
swi r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */
swi r4, r1, PTO+PT_R4;
la r6, r1, PTO; /* add user context as 2nd arg */
bralid r15, sys_sigsuspend; /* Do real work.*/
nop;
lwi r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */
lwi r4, r1, PTO+PT_R4;
bri ret_from_trap /* fall through will not work here due to align */
nop;
C_ENTRY(sys_rt_sigsuspend_wrapper):
swi r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */
swi r4, r1, PTO+PT_R4;
la r7, r1, PTO; /* add user context as 3rd arg */
brlid r15, sys_rt_sigsuspend; /* Do real work.*/
nop;
lwi r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */
lwi r4, r1, PTO+PT_R4;
bri ret_from_trap /* fall through will not work here due to align */
nop;
C_ENTRY(sys_sigreturn_wrapper):
swi r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */
swi r4, r1, PTO+PT_R4;
la r5, r1, PTO; /* add user context as 1st arg */
brlid r15, sys_sigreturn; /* Do real work.*/
nop;
lwi r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */
lwi r4, r1, PTO+PT_R4;
bri ret_from_trap /* fall through will not work here due to align */
nop;
C_ENTRY(sys_rt_sigreturn_wrapper):
swi r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */
swi r4, r1, PTO+PT_R4;
la r5, r1, PTO; /* add user context as 1st arg */
brlid r15, sys_rt_sigreturn /* Do real work */
nop;
lwi r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */
lwi r4, r1, PTO+PT_R4;
bri ret_from_trap /* fall through will not work here due to align */
nop;
/*
* HW EXCEPTION rutine start
*/
#define SAVE_STATE \
swi r11, r0, TOPHYS(r0_ram + PTO + PT_R11); /* Save r11 */ \
set_bip; /*equalize initial state for all possible entries*/\
clear_eip; \
enable_irq; \
set_ee; \
/* See if already in kernel mode.*/ \
lwi r11, r0, TOPHYS(PER_CPU(KM)); \
beqi r11, 1f; /* Jump ahead if coming from user */\
/* Kernel-mode state save. */ \
/* Reload kernel stack-ptr. */ \
lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP)); \
tophys(r1,r11); \
swi r11, r1, (PT_R1-PT_SIZE); /* Save original SP. */ \
lwi r11, r0, TOPHYS(r0_ram + PTO + PT_R11); /* restore r11 */\
addik r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack. */\
/* store return registers separately because \
* this macros is use for others exceptions */ \
swi r3, r1, PTO + PT_R3; \
swi r4, r1, PTO + PT_R4; \
SAVE_REGS \
/* PC, before IRQ/trap - this is one instruction above */ \
swi r17, r1, PTO+PT_PC; \
\
addi r11, r0, 1; /* Was in kernel-mode. */ \
swi r11, r1, PTO+PT_MODE; \
brid 2f; \
nop; /* Fill delay slot */ \
1: /* User-mode state save. */ \
lwi r11, r0, TOPHYS(r0_ram + PTO + PT_R11); /* restore r11 */\
lwi r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */\
tophys(r1,r1); \
lwi r1, r1, TS_THREAD_INFO; /* get the thread info */ \
addik r1, r1, THREAD_SIZE; /* calculate kernel stack pointer */\
tophys(r1,r1); \
\
addik r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack. */\
/* store return registers separately because this macros \
* is use for others exceptions */ \
swi r3, r1, PTO + PT_R3; \
swi r4, r1, PTO + PT_R4; \
SAVE_REGS \
/* PC, before IRQ/trap - this is one instruction above FIXME*/ \
swi r17, r1, PTO+PT_PC; \
\
swi r0, r1, PTO+PT_MODE; /* Was in user-mode. */ \
lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP)); \
swi r11, r1, PTO+PT_R1; /* Store user SP. */ \
addi r11, r0, 1; \
swi r11, r0, TOPHYS(PER_CPU(KM)); /* Now we're in kernel-mode.*/\
2: lwi r31, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */\
/* Save away the syscall number. */ \
swi r0, r1, PTO+PT_R0; \
tovirt(r1,r1)
C_ENTRY(full_exception_trap):
swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)) /* save stack */
/* adjust exception address for privileged instruction
* for finding where is it */
addik r17, r17, -4
SAVE_STATE /* Save registers */
/* FIXME this can be store directly in PT_ESR reg.
* I tested it but there is a fault */
/* where the trap should return need -8 to adjust for rtsd r15, 8 */
la r15, r0, ret_from_exc - 8
la r5, r1, PTO /* parameter struct pt_regs * regs */
mfs r6, resr
nop
mfs r7, rfsr; /* save FSR */
nop
la r12, r0, full_exception
set_vms;
rtbd r12, 0;
nop;
/*
* Unaligned data trap.
*
* Unaligned data trap last on 4k page is handled here.
*
* Trap entered via exception, so EE bit is set, and interrupts
* are masked. This is nice, means we don't have to CLI before state save
*
* The assembler routine is in "arch/microblaze/kernel/hw_exception_handler.S"
*/
C_ENTRY(unaligned_data_trap):
swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)) /* save stack */
SAVE_STATE /* Save registers.*/
/* where the trap should return need -8 to adjust for rtsd r15, 8 */
la r15, r0, ret_from_exc-8
mfs r3, resr /* ESR */
nop
mfs r4, rear /* EAR */
nop
la r7, r1, PTO /* parameter struct pt_regs * regs */
la r12, r0, _unaligned_data_exception
set_vms;
rtbd r12, 0; /* interrupts enabled */
nop;
/*
* Page fault traps.
*
* If the real exception handler (from hw_exception_handler.S) didn't find
* the mapping for the process, then we're thrown here to handle such situation.
*
* Trap entered via exceptions, so EE bit is set, and interrupts
* are masked. This is nice, means we don't have to CLI before state save
*
* Build a standard exception frame for TLB Access errors. All TLB exceptions
* will bail out to this point if they can't resolve the lightweight TLB fault.
*
* The C function called is in "arch/microblaze/mm/fault.c", declared as:
* void do_page_fault(struct pt_regs *regs,
* unsigned long address,
* unsigned long error_code)
*/
/* data and intruction trap - which is choose is resolved int fault.c */
C_ENTRY(page_fault_data_trap):
swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)) /* save stack */
SAVE_STATE /* Save registers.*/
/* where the trap should return need -8 to adjust for rtsd r15, 8 */
la r15, r0, ret_from_exc-8
la r5, r1, PTO /* parameter struct pt_regs * regs */
mfs r6, rear /* parameter unsigned long address */
nop
mfs r7, resr /* parameter unsigned long error_code */
nop
la r12, r0, do_page_fault
set_vms;
rtbd r12, 0; /* interrupts enabled */
nop;
C_ENTRY(page_fault_instr_trap):
swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)) /* save stack */
SAVE_STATE /* Save registers.*/
/* where the trap should return need -8 to adjust for rtsd r15, 8 */
la r15, r0, ret_from_exc-8
la r5, r1, PTO /* parameter struct pt_regs * regs */
mfs r6, rear /* parameter unsigned long address */
nop
ori r7, r0, 0 /* parameter unsigned long error_code */
la r12, r0, do_page_fault
set_vms;
rtbd r12, 0; /* interrupts enabled */
nop;
/* Entry point used to return from an exception. */
C_ENTRY(ret_from_exc):
set_bip; /* Ints masked for state restore*/
lwi r11, r1, PTO+PT_MODE;
bnei r11, 2f; /* See if returning to kernel mode, */
/* ... if so, skip resched &c. */
/* We're returning to user mode, so check for various conditions that
trigger rescheduling. */
/* Get current task ptr into r11 */
add r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */
lwi r11, r11, TS_THREAD_INFO; /* get thread info */
lwi r11, r11, TI_FLAGS; /* get flags in thread info */
andi r11, r11, _TIF_NEED_RESCHED;
beqi r11, 5f;
/* Call the scheduler before returning from a syscall/trap. */
bralid r15, schedule; /* Call scheduler */
nop; /* delay slot */
/* Maybe handle a signal */
5: add r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */
lwi r11, r11, TS_THREAD_INFO; /* get thread info */
lwi r11, r11, TI_FLAGS; /* get flags in thread info */
andi r11, r11, _TIF_SIGPENDING;
beqi r11, 1f; /* Signals to handle, handle them */
/*
* Handle a signal return; Pending signals should be in r18.
*
* Not all registers are saved by the normal trap/interrupt entry
* points (for instance, call-saved registers (because the normal
* C-compiler calling sequence in the kernel makes sure they're
* preserved), and call-clobbered registers in the case of
* traps), but signal handlers may want to examine or change the
* complete register state. Here we save anything not saved by
* the normal entry sequence, so that it may be safely restored
* (in a possibly modified form) after do_signal returns.
* store return registers separately because this macros is use
* for others exceptions */
swi r3, r1, PTO + PT_R3;
swi r4, r1, PTO + PT_R4;
la r5, r1, PTO; /* Arg 1: struct pt_regs *regs */
add r6, r0, r0; /* Arg 2: sigset_t *oldset */
addi r7, r0, 0; /* Arg 3: int in_syscall */
bralid r15, do_signal; /* Handle any signals */
nop;
lwi r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */
lwi r4, r1, PTO+PT_R4;
/* Finally, return to user state. */
1: swi r0, r0, PER_CPU(KM); /* Now officially in user state. */
add r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */
swi r11, r0, PER_CPU(CURRENT_SAVE); /* save current */
VM_OFF;
tophys(r1,r1);
lwi r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */
lwi r4, r1, PTO+PT_R4;
RESTORE_REGS;
addik r1, r1, STATE_SAVE_SIZE /* Clean up stack space. */
lwi r1, r1, PT_R1 - PT_SIZE; /* Restore user stack pointer. */
bri 6f;
/* Return to kernel state. */
2: VM_OFF;
tophys(r1,r1);
lwi r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */
lwi r4, r1, PTO+PT_R4;
RESTORE_REGS;
addik r1, r1, STATE_SAVE_SIZE /* Clean up stack space. */
tovirt(r1,r1);
6:
EXC_return: /* Make global symbol for debugging */
rtbd r14, 0; /* Instructions to return from an IRQ */
nop;
/*
* HW EXCEPTION rutine end
*/
/*
* Hardware maskable interrupts.
*
* The stack-pointer (r1) should have already been saved to the memory
* location PER_CPU(ENTRY_SP).
*/
C_ENTRY(_interrupt):
/* MS: we are in physical address */
/* Save registers, switch to proper stack, convert SP to virtual.*/
swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP))
swi r11, r0, TOPHYS(PER_CPU(R11_SAVE));
/* MS: See if already in kernel mode. */
lwi r11, r0, TOPHYS(PER_CPU(KM));
beqi r11, 1f; /* MS: Jump ahead if coming from user */
/* Kernel-mode state save. */
or r11, r1, r0
tophys(r1,r11); /* MS: I have in r1 physical address where stack is */
/* MS: Save original SP - position PT_R1 to next stack frame 4 *1 - 152*/
swi r11, r1, (PT_R1 - PT_SIZE);
/* MS: restore r11 because of saving in SAVE_REGS */
lwi r11, r0, TOPHYS(PER_CPU(R11_SAVE));
/* save registers */
/* MS: Make room on the stack -> activation record */
addik r1, r1, -STATE_SAVE_SIZE;
/* MS: store return registers separately because
* this macros is use for others exceptions */
swi r3, r1, PTO + PT_R3;
swi r4, r1, PTO + PT_R4;
SAVE_REGS
/* MS: store mode */
addi r11, r0, 1; /* MS: Was in kernel-mode. */
swi r11, r1, PTO + PT_MODE; /* MS: and save it */
brid 2f;
nop; /* MS: Fill delay slot */
1:
/* User-mode state save. */
/* MS: restore r11 -> FIXME move before SAVE_REG */
lwi r11, r0, TOPHYS(PER_CPU(R11_SAVE));
/* MS: get the saved current */
lwi r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE));
tophys(r1,r1);
lwi r1, r1, TS_THREAD_INFO;
addik r1, r1, THREAD_SIZE;
tophys(r1,r1);
/* save registers */
addik r1, r1, -STATE_SAVE_SIZE;
swi r3, r1, PTO+PT_R3;
swi r4, r1, PTO+PT_R4;
SAVE_REGS
/* calculate mode */
swi r0, r1, PTO + PT_MODE;
lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP));
swi r11, r1, PTO+PT_R1;
/* setup kernel mode to KM */
addi r11, r0, 1;
swi r11, r0, TOPHYS(PER_CPU(KM));
2:
lwi r31, r0, TOPHYS(PER_CPU(CURRENT_SAVE));
swi r0, r1, PTO + PT_R0;
tovirt(r1,r1)
la r5, r1, PTO;
set_vms;
la r11, r0, do_IRQ;
la r15, r0, irq_call;
irq_call:rtbd r11, 0;
nop;
/* MS: we are in virtual mode */
ret_from_irq:
lwi r11, r1, PTO + PT_MODE;
bnei r11, 2f;
add r11, r0, CURRENT_TASK;
lwi r11, r11, TS_THREAD_INFO;
lwi r11, r11, TI_FLAGS; /* MS: get flags from thread info */
andi r11, r11, _TIF_NEED_RESCHED;
beqi r11, 5f
bralid r15, schedule;
nop; /* delay slot */
/* Maybe handle a signal */
5: add r11, r0, CURRENT_TASK;
lwi r11, r11, TS_THREAD_INFO; /* MS: get thread info */
lwi r11, r11, TI_FLAGS; /* get flags in thread info */
andi r11, r11, _TIF_SIGPENDING;
beqid r11, no_intr_resched
/* Handle a signal return; Pending signals should be in r18. */
addi r7, r0, 0; /* Arg 3: int in_syscall */
la r5, r1, PTO; /* Arg 1: struct pt_regs *regs */
bralid r15, do_signal; /* Handle any signals */
add r6, r0, r0; /* Arg 2: sigset_t *oldset */
/* Finally, return to user state. */
no_intr_resched:
/* Disable interrupts, we are now committed to the state restore */
disable_irq
swi r0, r0, PER_CPU(KM); /* MS: Now officially in user state. */
add r11, r0, CURRENT_TASK;
swi r11, r0, PER_CPU(CURRENT_SAVE);
VM_OFF;
tophys(r1,r1);
lwi r3, r1, PTO + PT_R3; /* MS: restore saved r3, r4 registers */
lwi r4, r1, PTO + PT_R4;
RESTORE_REGS
addik r1, r1, STATE_SAVE_SIZE /* MS: Clean up stack space. */
lwi r1, r1, PT_R1 - PT_SIZE;
bri 6f;
/* MS: Return to kernel state. */
2: VM_OFF /* MS: turn off MMU */
tophys(r1,r1)
lwi r3, r1, PTO + PT_R3; /* MS: restore saved r3, r4 registers */
lwi r4, r1, PTO + PT_R4;
RESTORE_REGS
addik r1, r1, STATE_SAVE_SIZE /* MS: Clean up stack space. */
tovirt(r1,r1);
6:
IRQ_return: /* MS: Make global symbol for debugging */
rtid r14, 0
nop
/*
* `Debug' trap
* We enter dbtrap in "BIP" (breakpoint) mode.
* So we exit the breakpoint mode with an 'rtbd' and proceed with the
* original dbtrap.
* however, wait to save state first
*/
C_ENTRY(_debug_exception):
/* BIP bit is set on entry, no interrupts can occur */
swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP))
swi r11, r0, TOPHYS(r0_ram + PTO + PT_R11); /* Save r11 */
set_bip; /*equalize initial state for all possible entries*/
clear_eip;
enable_irq;
lwi r11, r0, TOPHYS(PER_CPU(KM));/* See if already in kernel mode.*/
beqi r11, 1f; /* Jump ahead if coming from user */
/* Kernel-mode state save. */
lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP)); /* Reload kernel stack-ptr*/
tophys(r1,r11);
swi r11, r1, (PT_R1-PT_SIZE); /* Save original SP. */
lwi r11, r0, TOPHYS(r0_ram + PTO + PT_R11); /* restore r11 */
addik r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack. */
swi r3, r1, PTO + PT_R3;
swi r4, r1, PTO + PT_R4;
SAVE_REGS;
addi r11, r0, 1; /* Was in kernel-mode. */
swi r11, r1, PTO + PT_MODE;
brid 2f;
nop; /* Fill delay slot */
1: /* User-mode state save. */
lwi r11, r0, TOPHYS(r0_ram + PTO + PT_R11); /* restore r11 */
lwi r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */
tophys(r1,r1);
lwi r1, r1, TS_THREAD_INFO; /* get the thread info */
addik r1, r1, THREAD_SIZE; /* calculate kernel stack pointer */
tophys(r1,r1);
addik r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack. */
swi r3, r1, PTO + PT_R3;
swi r4, r1, PTO + PT_R4;
SAVE_REGS;
swi r0, r1, PTO+PT_MODE; /* Was in user-mode. */
lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP));
swi r11, r1, PTO+PT_R1; /* Store user SP. */
addi r11, r0, 1;
swi r11, r0, TOPHYS(PER_CPU(KM)); /* Now we're in kernel-mode. */
2: lwi r31, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */
/* Save away the syscall number. */
swi r0, r1, PTO+PT_R0;
tovirt(r1,r1)
addi r5, r0, SIGTRAP /* send the trap signal */
add r6, r0, CURRENT_TASK; /* Get current task ptr into r11 */
addk r7, r0, r0 /* 3rd param zero */
set_vms;
la r11, r0, send_sig;
la r15, r0, dbtrap_call;
dbtrap_call: rtbd r11, 0;
nop;
set_bip; /* Ints masked for state restore*/
lwi r11, r1, PTO+PT_MODE;
bnei r11, 2f;
/* Get current task ptr into r11 */
add r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */
lwi r11, r11, TS_THREAD_INFO; /* get thread info */
lwi r11, r11, TI_FLAGS; /* get flags in thread info */
andi r11, r11, _TIF_NEED_RESCHED;
beqi r11, 5f;
/* Call the scheduler before returning from a syscall/trap. */
bralid r15, schedule; /* Call scheduler */
nop; /* delay slot */
/* XXX Is PT_DTRACE handling needed here? */
/* XXX m68knommu also checks TASK_STATE & TASK_COUNTER here. */
/* Maybe handle a signal */
5: add r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */
lwi r11, r11, TS_THREAD_INFO; /* get thread info */
lwi r11, r11, TI_FLAGS; /* get flags in thread info */
andi r11, r11, _TIF_SIGPENDING;
beqi r11, 1f; /* Signals to handle, handle them */
/* Handle a signal return; Pending signals should be in r18. */
/* Not all registers are saved by the normal trap/interrupt entry
points (for instance, call-saved registers (because the normal
C-compiler calling sequence in the kernel makes sure they're
preserved), and call-clobbered registers in the case of
traps), but signal handlers may want to examine or change the
complete register state. Here we save anything not saved by
the normal entry sequence, so that it may be safely restored
(in a possibly modified form) after do_signal returns. */
la r5, r1, PTO; /* Arg 1: struct pt_regs *regs */
add r6, r0, r0; /* Arg 2: sigset_t *oldset */
addi r7, r0, 0; /* Arg 3: int in_syscall */
bralid r15, do_signal; /* Handle any signals */
nop;
/* Finally, return to user state. */
1: swi r0, r0, PER_CPU(KM); /* Now officially in user state. */
add r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */
swi r11, r0, PER_CPU(CURRENT_SAVE); /* save current */
VM_OFF;
tophys(r1,r1);
lwi r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */
lwi r4, r1, PTO+PT_R4;
RESTORE_REGS
addik r1, r1, STATE_SAVE_SIZE /* Clean up stack space. */
lwi r1, r1, PT_R1 - PT_SIZE;
/* Restore user stack pointer. */
bri 6f;
/* Return to kernel state. */
2: VM_OFF;
tophys(r1,r1);
lwi r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */
lwi r4, r1, PTO+PT_R4;
RESTORE_REGS
addik r1, r1, STATE_SAVE_SIZE /* Clean up stack space. */
tovirt(r1,r1);
6:
DBTRAP_return: /* Make global symbol for debugging */
rtbd r14, 0; /* Instructions to return from an IRQ */
nop;
ENTRY(_switch_to)
/* prepare return value */
addk r3, r0, r31
/* save registers in cpu_context */
/* use r11 and r12, volatile registers, as temp register */
/* give start of cpu_context for previous process */
addik r11, r5, TI_CPU_CONTEXT
swi r1, r11, CC_R1
swi r2, r11, CC_R2
/* skip volatile registers.
* they are saved on stack when we jumped to _switch_to() */
/* dedicated registers */
swi r13, r11, CC_R13
swi r14, r11, CC_R14
swi r15, r11, CC_R15
swi r16, r11, CC_R16
swi r17, r11, CC_R17
swi r18, r11, CC_R18
/* save non-volatile registers */
swi r19, r11, CC_R19
swi r20, r11, CC_R20
swi r21, r11, CC_R21
swi r22, r11, CC_R22
swi r23, r11, CC_R23
swi r24, r11, CC_R24
swi r25, r11, CC_R25
swi r26, r11, CC_R26
swi r27, r11, CC_R27
swi r28, r11, CC_R28
swi r29, r11, CC_R29
swi r30, r11, CC_R30
/* special purpose registers */
mfs r12, rmsr
nop
swi r12, r11, CC_MSR
mfs r12, rear
nop
swi r12, r11, CC_EAR
mfs r12, resr
nop
swi r12, r11, CC_ESR
mfs r12, rfsr
nop
swi r12, r11, CC_FSR
/* update r31, the current */
lwi r31, r6, TI_TASK/* give me pointer to task which will be next */
/* stored it to current_save too */
swi r31, r0, PER_CPU(CURRENT_SAVE)
/* get new process' cpu context and restore */
/* give me start where start context of next task */
addik r11, r6, TI_CPU_CONTEXT
/* non-volatile registers */
lwi r30, r11, CC_R30
lwi r29, r11, CC_R29
lwi r28, r11, CC_R28
lwi r27, r11, CC_R27
lwi r26, r11, CC_R26
lwi r25, r11, CC_R25
lwi r24, r11, CC_R24
lwi r23, r11, CC_R23
lwi r22, r11, CC_R22
lwi r21, r11, CC_R21
lwi r20, r11, CC_R20
lwi r19, r11, CC_R19
/* dedicated registers */
lwi r18, r11, CC_R18
lwi r17, r11, CC_R17
lwi r16, r11, CC_R16
lwi r15, r11, CC_R15
lwi r14, r11, CC_R14
lwi r13, r11, CC_R13
/* skip volatile registers */
lwi r2, r11, CC_R2
lwi r1, r11, CC_R1
/* special purpose registers */
lwi r12, r11, CC_FSR
mts rfsr, r12
nop
lwi r12, r11, CC_MSR
mts rmsr, r12
nop
rtsd r15, 8
nop
ENTRY(_reset)
brai 0x70; /* Jump back to FS-boot */
ENTRY(_break)
mfs r5, rmsr
nop
swi r5, r0, 0x250 + TOPHYS(r0_ram)
mfs r5, resr
nop
swi r5, r0, 0x254 + TOPHYS(r0_ram)
bri 0
/* These are compiled and loaded into high memory, then
* copied into place in mach_early_setup */
.section .init.ivt, "ax"
.org 0x0
/* this is very important - here is the reset vector */
/* in current MMU branch you don't care what is here - it is
* used from bootloader site - but this is correct for FS-BOOT */
brai 0x70
nop
brai TOPHYS(_user_exception); /* syscall handler */
brai TOPHYS(_interrupt); /* Interrupt handler */
brai TOPHYS(_break); /* nmi trap handler */
brai TOPHYS(_hw_exception_handler); /* HW exception handler */
.org 0x60
brai TOPHYS(_debug_exception); /* debug trap handler*/
.section .rodata,"a"
#include "syscall_table.S"
syscall_table_size=(.-sys_call_table)
...@@ -21,9 +21,9 @@ ...@@ -21,9 +21,9 @@
#include <asm/exceptions.h> #include <asm/exceptions.h>
#include <asm/entry.h> /* For KM CPU var */ #include <asm/entry.h> /* For KM CPU var */
#include <asm/uaccess.h> #include <linux/uaccess.h>
#include <asm/errno.h> #include <linux/errno.h>
#include <asm/ptrace.h> #include <linux/ptrace.h>
#include <asm/current.h> #include <asm/current.h>
#define MICROBLAZE_ILL_OPCODE_EXCEPTION 0x02 #define MICROBLAZE_ILL_OPCODE_EXCEPTION 0x02
...@@ -31,7 +31,7 @@ ...@@ -31,7 +31,7 @@
#define MICROBLAZE_DBUS_EXCEPTION 0x04 #define MICROBLAZE_DBUS_EXCEPTION 0x04
#define MICROBLAZE_DIV_ZERO_EXCEPTION 0x05 #define MICROBLAZE_DIV_ZERO_EXCEPTION 0x05
#define MICROBLAZE_FPU_EXCEPTION 0x06 #define MICROBLAZE_FPU_EXCEPTION 0x06
#define MICROBLAZE_PRIVILEG_EXCEPTION 0x07 #define MICROBLAZE_PRIVILEGED_EXCEPTION 0x07
static DEFINE_SPINLOCK(die_lock); static DEFINE_SPINLOCK(die_lock);
...@@ -66,6 +66,11 @@ void _exception(int signr, struct pt_regs *regs, int code, unsigned long addr) ...@@ -66,6 +66,11 @@ void _exception(int signr, struct pt_regs *regs, int code, unsigned long addr)
asmlinkage void full_exception(struct pt_regs *regs, unsigned int type, asmlinkage void full_exception(struct pt_regs *regs, unsigned int type,
int fsr, int addr) int fsr, int addr)
{ {
#ifdef CONFIG_MMU
int code;
addr = regs->pc;
#endif
#if 0 #if 0
printk(KERN_WARNING "Exception %02x in %s mode, FSR=%08x PC=%08x ESR=%08x\n", printk(KERN_WARNING "Exception %02x in %s mode, FSR=%08x PC=%08x ESR=%08x\n",
type, user_mode(regs) ? "user" : "kernel", fsr, type, user_mode(regs) ? "user" : "kernel", fsr,
...@@ -74,7 +79,13 @@ asmlinkage void full_exception(struct pt_regs *regs, unsigned int type, ...@@ -74,7 +79,13 @@ asmlinkage void full_exception(struct pt_regs *regs, unsigned int type,
switch (type & 0x1F) { switch (type & 0x1F) {
case MICROBLAZE_ILL_OPCODE_EXCEPTION: case MICROBLAZE_ILL_OPCODE_EXCEPTION:
if (user_mode(regs)) {
printk(KERN_WARNING "Illegal opcode exception in user mode.\n");
_exception(SIGILL, regs, ILL_ILLOPC, addr); _exception(SIGILL, regs, ILL_ILLOPC, addr);
return;
}
printk(KERN_WARNING "Illegal opcode exception in kernel mode.\n");
die("opcode exception", regs, SIGBUS);
break; break;
case MICROBLAZE_IBUS_EXCEPTION: case MICROBLAZE_IBUS_EXCEPTION:
if (user_mode(regs)) { if (user_mode(regs)) {
...@@ -95,11 +106,16 @@ asmlinkage void full_exception(struct pt_regs *regs, unsigned int type, ...@@ -95,11 +106,16 @@ asmlinkage void full_exception(struct pt_regs *regs, unsigned int type,
die("bus exception", regs, SIGBUS); die("bus exception", regs, SIGBUS);
break; break;
case MICROBLAZE_DIV_ZERO_EXCEPTION: case MICROBLAZE_DIV_ZERO_EXCEPTION:
printk(KERN_WARNING "Divide by zero exception\n"); if (user_mode(regs)) {
printk(KERN_WARNING "Divide by zero exception in user mode\n");
_exception(SIGILL, regs, ILL_ILLOPC, addr); _exception(SIGILL, regs, ILL_ILLOPC, addr);
return;
}
printk(KERN_WARNING "Divide by zero exception in kernel mode.\n");
die("Divide by exception", regs, SIGBUS);
break; break;
case MICROBLAZE_FPU_EXCEPTION: case MICROBLAZE_FPU_EXCEPTION:
printk(KERN_WARNING "FPU exception\n");
/* IEEE FP exception */ /* IEEE FP exception */
/* I removed fsr variable and use code var for storing fsr */ /* I removed fsr variable and use code var for storing fsr */
if (fsr & FSR_IO) if (fsr & FSR_IO)
...@@ -115,7 +131,20 @@ asmlinkage void full_exception(struct pt_regs *regs, unsigned int type, ...@@ -115,7 +131,20 @@ asmlinkage void full_exception(struct pt_regs *regs, unsigned int type,
_exception(SIGFPE, regs, fsr, addr); _exception(SIGFPE, regs, fsr, addr);
break; break;
#ifdef CONFIG_MMU
case MICROBLAZE_PRIVILEGED_EXCEPTION:
printk(KERN_WARNING "Privileged exception\n");
/* "brk r0,r0" - used as debug breakpoint */
if (get_user(code, (unsigned long *)regs->pc) == 0
&& code == 0x980c0000) {
_exception(SIGTRAP, regs, TRAP_BRKPT, addr);
} else {
_exception(SIGILL, regs, ILL_PRVOPC, addr);
}
break;
#endif
default: default:
/* FIXME what to do in unexpected exception */
printk(KERN_WARNING "Unexpected exception %02x " printk(KERN_WARNING "Unexpected exception %02x "
"PC=%08x in %s mode\n", type, (unsigned int) addr, "PC=%08x in %s mode\n", type, (unsigned int) addr,
kernel_mode(regs) ? "kernel" : "user"); kernel_mode(regs) ? "kernel" : "user");
......
...@@ -3,6 +3,26 @@ ...@@ -3,6 +3,26 @@
* Copyright (C) 2007-2009 PetaLogix * Copyright (C) 2007-2009 PetaLogix
* Copyright (C) 2006 Atmark Techno, Inc. * Copyright (C) 2006 Atmark Techno, Inc.
* *
* MMU code derived from arch/ppc/kernel/head_4xx.S:
* Copyright (c) 1995-1996 Gary Thomas <gdt@linuxppc.org>
* Initial PowerPC version.
* Copyright (c) 1996 Cort Dougan <cort@cs.nmt.edu>
* Rewritten for PReP
* Copyright (c) 1996 Paul Mackerras <paulus@cs.anu.edu.au>
* Low-level exception handers, MMU support, and rewrite.
* Copyright (c) 1997 Dan Malek <dmalek@jlc.net>
* PowerPC 8xx modifications.
* Copyright (c) 1998-1999 TiVo, Inc.
* PowerPC 403GCX modifications.
* Copyright (c) 1999 Grant Erickson <grant@lcse.umn.edu>
* PowerPC 403GCX/405GP modifications.
* Copyright 2000 MontaVista Software Inc.
* PPC405 modifications
* PowerPC 403GCX/405GP modifications.
* Author: MontaVista Software, Inc.
* frank_rowand@mvista.com or source@mvista.com
* debbie_chu@mvista.com
*
* This file is subject to the terms and conditions of the GNU General Public * This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive * License. See the file "COPYING" in the main directory of this archive
* for more details. * for more details.
...@@ -12,6 +32,22 @@ ...@@ -12,6 +32,22 @@
#include <asm/thread_info.h> #include <asm/thread_info.h>
#include <asm/page.h> #include <asm/page.h>
#ifdef CONFIG_MMU
#include <asm/setup.h> /* COMMAND_LINE_SIZE */
#include <asm/mmu.h>
#include <asm/processor.h>
.data
.global empty_zero_page
.align 12
empty_zero_page:
.space 4096
.global swapper_pg_dir
swapper_pg_dir:
.space 4096
#endif /* CONFIG_MMU */
.text .text
ENTRY(_start) ENTRY(_start)
mfs r1, rmsr mfs r1, rmsr
...@@ -32,6 +68,123 @@ _copy_fdt: ...@@ -32,6 +68,123 @@ _copy_fdt:
addik r3, r3, -4 /* descrement loop */ addik r3, r3, -4 /* descrement loop */
no_fdt_arg: no_fdt_arg:
#ifdef CONFIG_MMU
#ifndef CONFIG_CMDLINE_BOOL
/*
* handling command line
* copy command line to __init_end. There is space for storing command line.
*/
or r6, r0, r0 /* incremment */
ori r4, r0, __init_end /* load address of command line */
tophys(r4,r4) /* convert to phys address */
ori r3, r0, COMMAND_LINE_SIZE - 1 /* number of loops */
_copy_command_line:
lbu r7, r5, r6 /* r7=r5+r6 - r5 contain pointer to command line */
sb r7, r4, r6 /* addr[r4+r6]= r7*/
addik r6, r6, 1 /* increment counting */
bgtid r3, _copy_command_line /* loop for all entries */
addik r3, r3, -1 /* descrement loop */
addik r5, r4, 0 /* add new space for command line */
tovirt(r5,r5)
#endif /* CONFIG_CMDLINE_BOOL */
#ifdef NOT_COMPILE
/* save bram context */
or r6, r0, r0 /* incremment */
ori r4, r0, TOPHYS(_bram_load_start) /* save bram context */
ori r3, r0, (LMB_SIZE - 4)
_copy_bram:
lw r7, r0, r6 /* r7 = r0 + r6 */
sw r7, r4, r6 /* addr[r4 + r6] = r7*/
addik r6, r6, 4 /* increment counting */
bgtid r3, _copy_bram /* loop for all entries */
addik r3, r3, -4 /* descrement loop */
#endif
/* We have to turn on the MMU right away. */
/*
* Set up the initial MMU state so we can do the first level of
* kernel initialization. This maps the first 16 MBytes of memory 1:1
* virtual to physical.
*/
nop
addik r3, r0, 63 /* Invalidate all TLB entries */
_invalidate:
mts rtlbx, r3
mts rtlbhi, r0 /* flush: ensure V is clear */
bgtid r3, _invalidate /* loop for all entries */
addik r3, r3, -1
/* sync */
/*
* We should still be executing code at physical address area
* RAM_BASEADDR at this point. However, kernel code is at
* a virtual address. So, set up a TLB mapping to cover this once
* translation is enabled.
*/
addik r3,r0, CONFIG_KERNEL_START /* Load the kernel virtual address */
tophys(r4,r3) /* Load the kernel physical address */
mts rpid,r0 /* Load the kernel PID */
nop
bri 4
/*
* Configure and load two entries into TLB slots 0 and 1.
* In case we are pinning TLBs, these are reserved in by the
* other TLB functions. If not reserving, then it doesn't
* matter where they are loaded.
*/
andi r4,r4,0xfffffc00 /* Mask off the real page number */
ori r4,r4,(TLB_WR | TLB_EX) /* Set the write and execute bits */
andi r3,r3,0xfffffc00 /* Mask off the effective page number */
ori r3,r3,(TLB_VALID | TLB_PAGESZ(PAGESZ_16M))
mts rtlbx,r0 /* TLB slow 0 */
mts rtlblo,r4 /* Load the data portion of the entry */
mts rtlbhi,r3 /* Load the tag portion of the entry */
addik r4, r4, 0x01000000 /* Map next 16 M entries */
addik r3, r3, 0x01000000
ori r6,r0,1 /* TLB slot 1 */
mts rtlbx,r6
mts rtlblo,r4 /* Load the data portion of the entry */
mts rtlbhi,r3 /* Load the tag portion of the entry */
/*
* Load a TLB entry for LMB, since we need access to
* the exception vectors, using a 4k real==virtual mapping.
*/
ori r6,r0,3 /* TLB slot 3 */
mts rtlbx,r6
ori r4,r0,(TLB_WR | TLB_EX)
ori r3,r0,(TLB_VALID | TLB_PAGESZ(PAGESZ_4K))
mts rtlblo,r4 /* Load the data portion of the entry */
mts rtlbhi,r3 /* Load the tag portion of the entry */
/*
* We now have the lower 16 Meg of RAM mapped into TLB entries, and the
* caches ready to work.
*/
turn_on_mmu:
ori r15,r0,start_here
ori r4,r0,MSR_KERNEL_VMS
mts rmsr,r4
nop
rted r15,0 /* enables MMU */
nop
start_here:
#endif /* CONFIG_MMU */
/* Initialize small data anchors */ /* Initialize small data anchors */
la r13, r0, _KERNEL_SDA_BASE_ la r13, r0, _KERNEL_SDA_BASE_
la r2, r0, _KERNEL_SDA2_BASE_ la r2, r0, _KERNEL_SDA2_BASE_
...@@ -51,6 +204,43 @@ no_fdt_arg: ...@@ -51,6 +204,43 @@ no_fdt_arg:
brald r15, r8 brald r15, r8
nop nop
#ifndef CONFIG_MMU
la r15, r0, machine_halt la r15, r0, machine_halt
braid start_kernel braid start_kernel
nop nop
#else
/*
* Initialize the MMU.
*/
bralid r15, mmu_init
nop
/* Go back to running unmapped so we can load up new values
* and change to using our exception vectors.
* On the MicroBlaze, all we invalidate the used TLB entries to clear
* the old 16M byte TLB mappings.
*/
ori r15,r0,TOPHYS(kernel_load_context)
ori r4,r0,MSR_KERNEL
mts rmsr,r4
nop
bri 4
rted r15,0
nop
/* Load up the kernel context */
kernel_load_context:
# Keep entry 0 and 1 valid. Entry 3 mapped to LMB can go away.
ori r5,r0,3
mts rtlbx,r5
nop
mts rtlbhi,r0
nop
addi r15, r0, machine_halt
ori r17, r0, start_kernel
ori r4, r0, MSR_KERNEL_VMS
mts rmsr, r4
nop
rted r17, 0 /* enable MMU and jump to start_kernel */
nop
#endif /* CONFIG_MMU */
...@@ -53,6 +53,12 @@ ...@@ -53,6 +53,12 @@
* - Illegal instruction opcode * - Illegal instruction opcode
* - Divide-by-zero * - Divide-by-zero
* *
* - Privileged instruction exception (MMU)
* - Data storage exception (MMU)
* - Instruction storage exception (MMU)
* - Data TLB miss exception (MMU)
* - Instruction TLB miss exception (MMU)
*
* Note we disable interrupts during exception handling, otherwise we will * Note we disable interrupts during exception handling, otherwise we will
* possibly get multiple re-entrancy if interrupt handles themselves cause * possibly get multiple re-entrancy if interrupt handles themselves cause
* exceptions. JW * exceptions. JW
...@@ -71,9 +77,24 @@ ...@@ -71,9 +77,24 @@
#include <asm/asm-offsets.h> #include <asm/asm-offsets.h>
/* Helpful Macros */ /* Helpful Macros */
#ifndef CONFIG_MMU
#define EX_HANDLER_STACK_SIZ (4*19) #define EX_HANDLER_STACK_SIZ (4*19)
#endif
#define NUM_TO_REG(num) r ## num #define NUM_TO_REG(num) r ## num
#ifdef CONFIG_MMU
/* FIXME you can't change first load of MSR because there is
* hardcoded jump bri 4 */
#define RESTORE_STATE \
lwi r3, r1, PT_R3; \
lwi r4, r1, PT_R4; \
lwi r5, r1, PT_R5; \
lwi r6, r1, PT_R6; \
lwi r11, r1, PT_R11; \
lwi r31, r1, PT_R31; \
lwi r1, r0, TOPHYS(r0_ram + 0);
#endif /* CONFIG_MMU */
#define LWREG_NOP \ #define LWREG_NOP \
bri ex_handler_unhandled; \ bri ex_handler_unhandled; \
nop; nop;
...@@ -106,6 +127,54 @@ ...@@ -106,6 +127,54 @@
or r3, r0, NUM_TO_REG (regnum); \ or r3, r0, NUM_TO_REG (regnum); \
bri ex_sw_tail; bri ex_sw_tail;
#ifdef CONFIG_MMU
#define R3_TO_LWREG_VM_V(regnum) \
brid ex_lw_end_vm; \
swi r3, r7, 4 * regnum;
#define R3_TO_LWREG_VM(regnum) \
brid ex_lw_end_vm; \
or NUM_TO_REG (regnum), r0, r3;
#define SWREG_TO_R3_VM_V(regnum) \
brid ex_sw_tail_vm; \
lwi r3, r7, 4 * regnum;
#define SWREG_TO_R3_VM(regnum) \
brid ex_sw_tail_vm; \
or r3, r0, NUM_TO_REG (regnum);
/* Shift right instruction depending on available configuration */
#if CONFIG_XILINX_MICROBLAZE0_USE_BARREL > 0
#define BSRLI(rD, rA, imm) \
bsrli rD, rA, imm
#elif CONFIG_XILINX_MICROBLAZE0_USE_DIV > 0
#define BSRLI(rD, rA, imm) \
ori rD, r0, (1 << imm); \
idivu rD, rD, rA
#else
#define BSRLI(rD, rA, imm) BSRLI ## imm (rD, rA)
/* Only the used shift constants defined here - add more if needed */
#define BSRLI2(rD, rA) \
srl rD, rA; /* << 1 */ \
srl rD, rD; /* << 2 */
#define BSRLI10(rD, rA) \
srl rD, rA; /* << 1 */ \
srl rD, rD; /* << 2 */ \
srl rD, rD; /* << 3 */ \
srl rD, rD; /* << 4 */ \
srl rD, rD; /* << 5 */ \
srl rD, rD; /* << 6 */ \
srl rD, rD; /* << 7 */ \
srl rD, rD; /* << 8 */ \
srl rD, rD; /* << 9 */ \
srl rD, rD /* << 10 */
#define BSRLI20(rD, rA) \
BSRLI10(rD, rA); \
BSRLI10(rD, rD)
#endif
#endif /* CONFIG_MMU */
.extern other_exception_handler /* Defined in exception.c */ .extern other_exception_handler /* Defined in exception.c */
/* /*
...@@ -163,34 +232,119 @@ ...@@ -163,34 +232,119 @@
/* wrappers to restore state before coming to entry.S */ /* wrappers to restore state before coming to entry.S */
#ifdef CONFIG_MMU
.section .rodata
.align 4
_MB_HW_ExceptionVectorTable:
/* 0 - Undefined */
.long TOPHYS(ex_handler_unhandled)
/* 1 - Unaligned data access exception */
.long TOPHYS(handle_unaligned_ex)
/* 2 - Illegal op-code exception */
.long TOPHYS(full_exception_trapw)
/* 3 - Instruction bus error exception */
.long TOPHYS(full_exception_trapw)
/* 4 - Data bus error exception */
.long TOPHYS(full_exception_trapw)
/* 5 - Divide by zero exception */
.long TOPHYS(full_exception_trapw)
/* 6 - Floating point unit exception */
.long TOPHYS(full_exception_trapw)
/* 7 - Privileged instruction exception */
.long TOPHYS(full_exception_trapw)
/* 8 - 15 - Undefined */
.long TOPHYS(ex_handler_unhandled)
.long TOPHYS(ex_handler_unhandled)
.long TOPHYS(ex_handler_unhandled)
.long TOPHYS(ex_handler_unhandled)
.long TOPHYS(ex_handler_unhandled)
.long TOPHYS(ex_handler_unhandled)
.long TOPHYS(ex_handler_unhandled)
.long TOPHYS(ex_handler_unhandled)
/* 16 - Data storage exception */
.long TOPHYS(handle_data_storage_exception)
/* 17 - Instruction storage exception */
.long TOPHYS(handle_instruction_storage_exception)
/* 18 - Data TLB miss exception */
.long TOPHYS(handle_data_tlb_miss_exception)
/* 19 - Instruction TLB miss exception */
.long TOPHYS(handle_instruction_tlb_miss_exception)
/* 20 - 31 - Undefined */
.long TOPHYS(ex_handler_unhandled)
.long TOPHYS(ex_handler_unhandled)
.long TOPHYS(ex_handler_unhandled)
.long TOPHYS(ex_handler_unhandled)
.long TOPHYS(ex_handler_unhandled)
.long TOPHYS(ex_handler_unhandled)
.long TOPHYS(ex_handler_unhandled)
.long TOPHYS(ex_handler_unhandled)
.long TOPHYS(ex_handler_unhandled)
.long TOPHYS(ex_handler_unhandled)
.long TOPHYS(ex_handler_unhandled)
.long TOPHYS(ex_handler_unhandled)
#endif
.global _hw_exception_handler .global _hw_exception_handler
.section .text .section .text
.align 4 .align 4
.ent _hw_exception_handler .ent _hw_exception_handler
_hw_exception_handler: _hw_exception_handler:
#ifndef CONFIG_MMU
addik r1, r1, -(EX_HANDLER_STACK_SIZ); /* Create stack frame */ addik r1, r1, -(EX_HANDLER_STACK_SIZ); /* Create stack frame */
#else
swi r1, r0, TOPHYS(r0_ram + 0); /* GET_SP */
/* Save date to kernel memory. Here is the problem
* when you came from user space */
ori r1, r0, TOPHYS(r0_ram + 28);
#endif
swi r3, r1, PT_R3 swi r3, r1, PT_R3
swi r4, r1, PT_R4 swi r4, r1, PT_R4
swi r5, r1, PT_R5 swi r5, r1, PT_R5
swi r6, r1, PT_R6 swi r6, r1, PT_R6
mfs r5, rmsr; #ifdef CONFIG_MMU
nop swi r11, r1, PT_R11
swi r5, r1, 0; swi r31, r1, PT_R31
mfs r4, rbtr /* Save BTR before jumping to handler */ lwi r31, r0, TOPHYS(PER_CPU(CURRENT_SAVE)) /* get saved current */
nop #endif
mfs r3, resr mfs r3, resr
nop nop
mfs r4, rear;
nop
#ifndef CONFIG_MMU
andi r5, r3, 0x1000; /* Check ESR[DS] */ andi r5, r3, 0x1000; /* Check ESR[DS] */
beqi r5, not_in_delay_slot; /* Branch if ESR[DS] not set */ beqi r5, not_in_delay_slot; /* Branch if ESR[DS] not set */
mfs r17, rbtr; /* ESR[DS] set - return address in BTR */ mfs r17, rbtr; /* ESR[DS] set - return address in BTR */
nop nop
not_in_delay_slot: not_in_delay_slot:
swi r17, r1, PT_R17 swi r17, r1, PT_R17
#endif
andi r5, r3, 0x1F; /* Extract ESR[EXC] */ andi r5, r3, 0x1F; /* Extract ESR[EXC] */
#ifdef CONFIG_MMU
/* Calculate exception vector offset = r5 << 2 */
addk r6, r5, r5; /* << 1 */
addk r6, r6, r6; /* << 2 */
/* counting which exception happen */
lwi r5, r0, 0x200 + TOPHYS(r0_ram)
addi r5, r5, 1
swi r5, r0, 0x200 + TOPHYS(r0_ram)
lwi r5, r6, 0x200 + TOPHYS(r0_ram)
addi r5, r5, 1
swi r5, r6, 0x200 + TOPHYS(r0_ram)
/* end */
/* Load the HW Exception vector */
lwi r6, r6, TOPHYS(_MB_HW_ExceptionVectorTable)
bra r6
full_exception_trapw:
RESTORE_STATE
bri full_exception_trap
#else
/* Exceptions enabled here. This will allow nested exceptions */ /* Exceptions enabled here. This will allow nested exceptions */
mfs r6, rmsr; mfs r6, rmsr;
nop nop
...@@ -254,6 +408,7 @@ handle_other_ex: /* Handle Other exceptions here */ ...@@ -254,6 +408,7 @@ handle_other_ex: /* Handle Other exceptions here */
lwi r18, r1, PT_R18 lwi r18, r1, PT_R18
bri ex_handler_done; /* Complete exception handling */ bri ex_handler_done; /* Complete exception handling */
#endif
/* 0x01 - Unaligned data access exception /* 0x01 - Unaligned data access exception
* This occurs when a word access is not aligned on a word boundary, * This occurs when a word access is not aligned on a word boundary,
...@@ -265,11 +420,28 @@ handle_other_ex: /* Handle Other exceptions here */ ...@@ -265,11 +420,28 @@ handle_other_ex: /* Handle Other exceptions here */
handle_unaligned_ex: handle_unaligned_ex:
/* Working registers already saved: R3, R4, R5, R6 /* Working registers already saved: R3, R4, R5, R6
* R3 = ESR * R3 = ESR
* R4 = BTR * R4 = EAR
*/ */
mfs r4, rear; #ifdef CONFIG_MMU
andi r6, r3, 0x1000 /* Check ESR[DS] */
beqi r6, _no_delayslot /* Branch if ESR[DS] not set */
mfs r17, rbtr; /* ESR[DS] set - return address in BTR */
nop nop
_no_delayslot:
#endif
#ifdef CONFIG_MMU
/* Check if unaligned address is last on a 4k page */
andi r5, r4, 0xffc
xori r5, r5, 0xffc
bnei r5, _unaligned_ex2
_unaligned_ex1:
RESTORE_STATE;
/* Another page must be accessed or physical address not in page table */
bri unaligned_data_trap
_unaligned_ex2:
#endif
andi r6, r3, 0x3E0; /* Mask and extract the register operand */ andi r6, r3, 0x3E0; /* Mask and extract the register operand */
srl r6, r6; /* r6 >> 5 */ srl r6, r6; /* r6 >> 5 */
srl r6, r6; srl r6, r6;
...@@ -278,6 +450,45 @@ handle_unaligned_ex: ...@@ -278,6 +450,45 @@ handle_unaligned_ex:
srl r6, r6; srl r6, r6;
/* Store the register operand in a temporary location */ /* Store the register operand in a temporary location */
sbi r6, r0, TOPHYS(ex_reg_op); sbi r6, r0, TOPHYS(ex_reg_op);
#ifdef CONFIG_MMU
/* Get physical address */
/* If we are faulting a kernel address, we have to use the
* kernel page tables.
*/
ori r5, r0, CONFIG_KERNEL_START
cmpu r5, r4, r5
bgti r5, _unaligned_ex3
ori r5, r0, swapper_pg_dir
bri _unaligned_ex4
/* Get the PGD for the current thread. */
_unaligned_ex3: /* user thread */
addi r5 ,CURRENT_TASK, TOPHYS(0); /* get current task address */
lwi r5, r5, TASK_THREAD + PGDIR
_unaligned_ex4:
tophys(r5,r5)
BSRLI(r6,r4,20) /* Create L1 (pgdir/pmd) address */
andi r6, r6, 0xffc
/* Assume pgdir aligned on 4K boundary, no need for "andi r5,r5,0xfffff003" */
or r5, r5, r6
lwi r6, r5, 0 /* Get L1 entry */
andi r5, r6, 0xfffff000 /* Extract L2 (pte) base address. */
beqi r5, _unaligned_ex1 /* Bail if no table */
tophys(r5,r5)
BSRLI(r6,r4,10) /* Compute PTE address */
andi r6, r6, 0xffc
andi r5, r5, 0xfffff003
or r5, r5, r6
lwi r5, r5, 0 /* Get Linux PTE */
andi r6, r5, _PAGE_PRESENT
beqi r6, _unaligned_ex1 /* Bail if no page */
andi r5, r5, 0xfffff000 /* Extract RPN */
andi r4, r4, 0x00000fff /* Extract offset */
or r4, r4, r5 /* Create physical address */
#endif /* CONFIG_MMU */
andi r6, r3, 0x400; /* Extract ESR[S] */ andi r6, r3, 0x400; /* Extract ESR[S] */
bnei r6, ex_sw; bnei r6, ex_sw;
...@@ -355,6 +566,7 @@ ex_shw: ...@@ -355,6 +566,7 @@ ex_shw:
ex_sw_end: /* Exception handling of store word, ends. */ ex_sw_end: /* Exception handling of store word, ends. */
ex_handler_done: ex_handler_done:
#ifndef CONFIG_MMU
lwi r5, r1, 0 /* RMSR */ lwi r5, r1, 0 /* RMSR */
mts rmsr, r5 mts rmsr, r5
nop nop
...@@ -366,13 +578,455 @@ ex_handler_done: ...@@ -366,13 +578,455 @@ ex_handler_done:
rted r17, 0 rted r17, 0
addik r1, r1, (EX_HANDLER_STACK_SIZ); /* Restore stack frame */ addik r1, r1, (EX_HANDLER_STACK_SIZ); /* Restore stack frame */
#else
RESTORE_STATE;
rted r17, 0
nop
#endif
#ifdef CONFIG_MMU
/* Exception vector entry code. This code runs with address translation
* turned off (i.e. using physical addresses). */
/* Exception vectors. */
/* 0x10 - Data Storage Exception
* This happens for just a few reasons. U0 set (but we don't do that),
* or zone protection fault (user violation, write to protected page).
* If this is just an update of modified status, we do that quickly
* and exit. Otherwise, we call heavyweight functions to do the work.
*/
handle_data_storage_exception:
/* Working registers already saved: R3, R4, R5, R6
* R3 = ESR
*/
mfs r11, rpid
nop
bri 4
mfs r3, rear /* Get faulting address */
nop
/* If we are faulting a kernel address, we have to use the
* kernel page tables.
*/
ori r4, r0, CONFIG_KERNEL_START
cmpu r4, r3, r4
bgti r4, ex3
/* First, check if it was a zone fault (which means a user
* tried to access a kernel or read-protected page - always
* a SEGV). All other faults here must be stores, so no
* need to check ESR_S as well. */
mfs r4, resr
nop
andi r4, r4, 0x800 /* ESR_Z - zone protection */
bnei r4, ex2
ori r4, r0, swapper_pg_dir
mts rpid, r0 /* TLB will have 0 TID */
nop
bri ex4
/* Get the PGD for the current thread. */
ex3:
/* First, check if it was a zone fault (which means a user
* tried to access a kernel or read-protected page - always
* a SEGV). All other faults here must be stores, so no
* need to check ESR_S as well. */
mfs r4, resr
nop
andi r4, r4, 0x800 /* ESR_Z */
bnei r4, ex2
/* get current task address */
addi r4 ,CURRENT_TASK, TOPHYS(0);
lwi r4, r4, TASK_THREAD+PGDIR
ex4:
tophys(r4,r4)
BSRLI(r5,r3,20) /* Create L1 (pgdir/pmd) address */
andi r5, r5, 0xffc
/* Assume pgdir aligned on 4K boundary, no need for "andi r4,r4,0xfffff003" */
or r4, r4, r5
lwi r4, r4, 0 /* Get L1 entry */
andi r5, r4, 0xfffff000 /* Extract L2 (pte) base address */
beqi r5, ex2 /* Bail if no table */
tophys(r5,r5)
BSRLI(r6,r3,10) /* Compute PTE address */
andi r6, r6, 0xffc
andi r5, r5, 0xfffff003
or r5, r5, r6
lwi r4, r5, 0 /* Get Linux PTE */
andi r6, r4, _PAGE_RW /* Is it writeable? */
beqi r6, ex2 /* Bail if not */
/* Update 'changed' */
ori r4, r4, _PAGE_DIRTY|_PAGE_ACCESSED|_PAGE_HWWRITE
swi r4, r5, 0 /* Update Linux page table */
/* Most of the Linux PTE is ready to load into the TLB LO.
* We set ZSEL, where only the LS-bit determines user access.
* We set execute, because we don't have the granularity to
* properly set this at the page level (Linux problem).
* If shared is set, we cause a zero PID->TID load.
* Many of these bits are software only. Bits we don't set
* here we (properly should) assume have the appropriate value.
*/
andni r4, r4, 0x0ce2 /* Make sure 20, 21 are zero */
ori r4, r4, _PAGE_HWEXEC /* make it executable */
/* find the TLB index that caused the fault. It has to be here*/
mts rtlbsx, r3
nop
mfs r5, rtlbx /* DEBUG: TBD */
nop
mts rtlblo, r4 /* Load TLB LO */
nop
/* Will sync shadow TLBs */
/* Done...restore registers and get out of here. */
mts rpid, r11
nop
bri 4
RESTORE_STATE;
rted r17, 0
nop
ex2:
/* The bailout. Restore registers to pre-exception conditions
* and call the heavyweights to help us out. */
mts rpid, r11
nop
bri 4
RESTORE_STATE;
bri page_fault_data_trap
/* 0x11 - Instruction Storage Exception
* This is caused by a fetch from non-execute or guarded pages. */
handle_instruction_storage_exception:
/* Working registers already saved: R3, R4, R5, R6
* R3 = ESR
*/
mfs r3, rear /* Get faulting address */
nop
RESTORE_STATE;
bri page_fault_instr_trap
/* 0x12 - Data TLB Miss Exception
* As the name implies, translation is not in the MMU, so search the
* page tables and fix it. The only purpose of this function is to
* load TLB entries from the page table if they exist.
*/
handle_data_tlb_miss_exception:
/* Working registers already saved: R3, R4, R5, R6
* R3 = ESR
*/
mfs r11, rpid
nop
bri 4
mfs r3, rear /* Get faulting address */
nop
/* If we are faulting a kernel address, we have to use the
* kernel page tables. */
ori r4, r0, CONFIG_KERNEL_START
cmpu r4, r3, r4
bgti r4, ex5
ori r4, r0, swapper_pg_dir
mts rpid, r0 /* TLB will have 0 TID */
nop
bri ex6
/* Get the PGD for the current thread. */
ex5:
/* get current task address */
addi r4 ,CURRENT_TASK, TOPHYS(0);
lwi r4, r4, TASK_THREAD+PGDIR
ex6:
tophys(r4,r4)
BSRLI(r5,r3,20) /* Create L1 (pgdir/pmd) address */
andi r5, r5, 0xffc
/* Assume pgdir aligned on 4K boundary, no need for "andi r4,r4,0xfffff003" */
or r4, r4, r5
lwi r4, r4, 0 /* Get L1 entry */
andi r5, r4, 0xfffff000 /* Extract L2 (pte) base address */
beqi r5, ex7 /* Bail if no table */
tophys(r5,r5)
BSRLI(r6,r3,10) /* Compute PTE address */
andi r6, r6, 0xffc
andi r5, r5, 0xfffff003
or r5, r5, r6
lwi r4, r5, 0 /* Get Linux PTE */
andi r6, r4, _PAGE_PRESENT
beqi r6, ex7
ori r4, r4, _PAGE_ACCESSED
swi r4, r5, 0
/* Most of the Linux PTE is ready to load into the TLB LO.
* We set ZSEL, where only the LS-bit determines user access.
* We set execute, because we don't have the granularity to
* properly set this at the page level (Linux problem).
* If shared is set, we cause a zero PID->TID load.
* Many of these bits are software only. Bits we don't set
* here we (properly should) assume have the appropriate value.
*/
andni r4, r4, 0x0ce2 /* Make sure 20, 21 are zero */
bri finish_tlb_load
ex7:
/* The bailout. Restore registers to pre-exception conditions
* and call the heavyweights to help us out.
*/
mts rpid, r11
nop
bri 4
RESTORE_STATE;
bri page_fault_data_trap
/* 0x13 - Instruction TLB Miss Exception
* Nearly the same as above, except we get our information from
* different registers and bailout to a different point.
*/
handle_instruction_tlb_miss_exception:
/* Working registers already saved: R3, R4, R5, R6
* R3 = ESR
*/
mfs r11, rpid
nop
bri 4
mfs r3, rear /* Get faulting address */
nop
/* If we are faulting a kernel address, we have to use the
* kernel page tables.
*/
ori r4, r0, CONFIG_KERNEL_START
cmpu r4, r3, r4
bgti r4, ex8
ori r4, r0, swapper_pg_dir
mts rpid, r0 /* TLB will have 0 TID */
nop
bri ex9
/* Get the PGD for the current thread. */
ex8:
/* get current task address */
addi r4 ,CURRENT_TASK, TOPHYS(0);
lwi r4, r4, TASK_THREAD+PGDIR
ex9:
tophys(r4,r4)
BSRLI(r5,r3,20) /* Create L1 (pgdir/pmd) address */
andi r5, r5, 0xffc
/* Assume pgdir aligned on 4K boundary, no need for "andi r4,r4,0xfffff003" */
or r4, r4, r5
lwi r4, r4, 0 /* Get L1 entry */
andi r5, r4, 0xfffff000 /* Extract L2 (pte) base address */
beqi r5, ex10 /* Bail if no table */
tophys(r5,r5)
BSRLI(r6,r3,10) /* Compute PTE address */
andi r6, r6, 0xffc
andi r5, r5, 0xfffff003
or r5, r5, r6
lwi r4, r5, 0 /* Get Linux PTE */
andi r6, r4, _PAGE_PRESENT
beqi r6, ex7
ori r4, r4, _PAGE_ACCESSED
swi r4, r5, 0
/* Most of the Linux PTE is ready to load into the TLB LO.
* We set ZSEL, where only the LS-bit determines user access.
* We set execute, because we don't have the granularity to
* properly set this at the page level (Linux problem).
* If shared is set, we cause a zero PID->TID load.
* Many of these bits are software only. Bits we don't set
* here we (properly should) assume have the appropriate value.
*/
andni r4, r4, 0x0ce2 /* Make sure 20, 21 are zero */
bri finish_tlb_load
ex10:
/* The bailout. Restore registers to pre-exception conditions
* and call the heavyweights to help us out.
*/
mts rpid, r11
nop
bri 4
RESTORE_STATE;
bri page_fault_instr_trap
/* Both the instruction and data TLB miss get to this point to load the TLB.
* r3 - EA of fault
* r4 - TLB LO (info from Linux PTE)
* r5, r6 - available to use
* PID - loaded with proper value when we get here
* Upon exit, we reload everything and RFI.
* A common place to load the TLB.
*/
tlb_index:
.long 1 /* MS: storing last used tlb index */
finish_tlb_load:
/* MS: load the last used TLB index. */
lwi r5, r0, TOPHYS(tlb_index)
addik r5, r5, 1 /* MS: inc tlb_index -> use next one */
/* MS: FIXME this is potential fault, because this is mask not count */
andi r5, r5, (MICROBLAZE_TLB_SIZE-1)
ori r6, r0, 1
cmp r31, r5, r6
blti r31, sem
addik r5, r6, 1
sem:
/* MS: save back current TLB index */
swi r5, r0, TOPHYS(tlb_index)
ori r4, r4, _PAGE_HWEXEC /* make it executable */
mts rtlbx, r5 /* MS: save current TLB */
nop
mts rtlblo, r4 /* MS: save to TLB LO */
nop
/* Create EPN. This is the faulting address plus a static
* set of bits. These are size, valid, E, U0, and ensure
* bits 20 and 21 are zero.
*/
andi r3, r3, 0xfffff000
ori r3, r3, 0x0c0
mts rtlbhi, r3 /* Load TLB HI */
nop
/* Done...restore registers and get out of here. */
ex12:
mts rpid, r11
nop
bri 4
RESTORE_STATE;
rted r17, 0
nop
/* extern void giveup_fpu(struct task_struct *prev)
*
* The MicroBlaze processor may have an FPU, so this should not just
* return: TBD.
*/
.globl giveup_fpu;
.align 4;
giveup_fpu:
bralid r15,0 /* TBD */
nop
/* At present, this routine just hangs. - extern void abort(void) */
.globl abort;
.align 4;
abort:
br r0
.globl set_context;
.align 4;
set_context:
mts rpid, r5 /* Shadow TLBs are automatically */
nop
bri 4 /* flushed by changing PID */
rtsd r15,8
nop
#endif
.end _hw_exception_handler .end _hw_exception_handler
#ifdef CONFIG_MMU
/* Unaligned data access exception last on a 4k page for MMU.
* When this is called, we are in virtual mode with exceptions enabled
* and registers 1-13,15,17,18 saved.
*
* R3 = ESR
* R4 = EAR
* R7 = pointer to saved registers (struct pt_regs *regs)
*
* This handler perform the access, and returns via ret_from_exc.
*/
.global _unaligned_data_exception
.ent _unaligned_data_exception
_unaligned_data_exception:
andi r8, r3, 0x3E0; /* Mask and extract the register operand */
BSRLI(r8,r8,2); /* r8 >> 2 = register operand * 8 */
andi r6, r3, 0x400; /* Extract ESR[S] */
bneid r6, ex_sw_vm;
andi r6, r3, 0x800; /* Extract ESR[W] - delay slot */
ex_lw_vm:
beqid r6, ex_lhw_vm;
lbui r5, r4, 0; /* Exception address in r4 - delay slot */
/* Load a word, byte-by-byte from destination address and save it in tmp space*/
la r6, r0, ex_tmp_data_loc_0;
sbi r5, r6, 0;
lbui r5, r4, 1;
sbi r5, r6, 1;
lbui r5, r4, 2;
sbi r5, r6, 2;
lbui r5, r4, 3;
sbi r5, r6, 3;
brid ex_lw_tail_vm;
/* Get the destination register value into r3 - delay slot */
lwi r3, r6, 0;
ex_lhw_vm:
/* Load a half-word, byte-by-byte from destination address and
* save it in tmp space */
la r6, r0, ex_tmp_data_loc_0;
sbi r5, r6, 0;
lbui r5, r4, 1;
sbi r5, r6, 1;
lhui r3, r6, 0; /* Get the destination register value into r3 */
ex_lw_tail_vm:
/* Form load_word jump table offset (lw_table_vm + (8 * regnum)) */
addik r5, r8, lw_table_vm;
bra r5;
ex_lw_end_vm: /* Exception handling of load word, ends */
brai ret_from_exc;
ex_sw_vm:
/* Form store_word jump table offset (sw_table_vm + (8 * regnum)) */
addik r5, r8, sw_table_vm;
bra r5;
ex_sw_tail_vm:
la r5, r0, ex_tmp_data_loc_0;
beqid r6, ex_shw_vm;
swi r3, r5, 0; /* Get the word - delay slot */
/* Store the word, byte-by-byte into destination address */
lbui r3, r5, 0;
sbi r3, r4, 0;
lbui r3, r5, 1;
sbi r3, r4, 1;
lbui r3, r5, 2;
sbi r3, r4, 2;
lbui r3, r5, 3;
brid ret_from_exc;
sbi r3, r4, 3; /* Delay slot */
ex_shw_vm:
/* Store the lower half-word, byte-by-byte into destination address */
lbui r3, r5, 2;
sbi r3, r4, 0;
lbui r3, r5, 3;
brid ret_from_exc;
sbi r3, r4, 1; /* Delay slot */
ex_sw_end_vm: /* Exception handling of store word, ends. */
.end _unaligned_data_exception
#endif /* CONFIG_MMU */
ex_handler_unhandled: ex_handler_unhandled:
/* FIXME add handle function for unhandled exception - dump register */ /* FIXME add handle function for unhandled exception - dump register */
bri 0 bri 0
/*
* hw_exception_handler Jump Table
* - Contains code snippets for each register that caused the unalign exception
* - Hence exception handler is NOT self-modifying
* - Separate table for load exceptions and store exceptions.
* - Each table is of size: (8 * 32) = 256 bytes
*/
.section .text .section .text
.align 4 .align 4
lw_table: lw_table:
...@@ -407,7 +1061,11 @@ lw_r27: R3_TO_LWREG (27); ...@@ -407,7 +1061,11 @@ lw_r27: R3_TO_LWREG (27);
lw_r28: R3_TO_LWREG (28); lw_r28: R3_TO_LWREG (28);
lw_r29: R3_TO_LWREG (29); lw_r29: R3_TO_LWREG (29);
lw_r30: R3_TO_LWREG (30); lw_r30: R3_TO_LWREG (30);
#ifdef CONFIG_MMU
lw_r31: R3_TO_LWREG_V (31);
#else
lw_r31: R3_TO_LWREG (31); lw_r31: R3_TO_LWREG (31);
#endif
sw_table: sw_table:
sw_r0: SWREG_TO_R3 (0); sw_r0: SWREG_TO_R3 (0);
...@@ -441,7 +1099,81 @@ sw_r27: SWREG_TO_R3 (27); ...@@ -441,7 +1099,81 @@ sw_r27: SWREG_TO_R3 (27);
sw_r28: SWREG_TO_R3 (28); sw_r28: SWREG_TO_R3 (28);
sw_r29: SWREG_TO_R3 (29); sw_r29: SWREG_TO_R3 (29);
sw_r30: SWREG_TO_R3 (30); sw_r30: SWREG_TO_R3 (30);
#ifdef CONFIG_MMU
sw_r31: SWREG_TO_R3_V (31);
#else
sw_r31: SWREG_TO_R3 (31); sw_r31: SWREG_TO_R3 (31);
#endif
#ifdef CONFIG_MMU
lw_table_vm:
lw_r0_vm: R3_TO_LWREG_VM (0);
lw_r1_vm: R3_TO_LWREG_VM_V (1);
lw_r2_vm: R3_TO_LWREG_VM_V (2);
lw_r3_vm: R3_TO_LWREG_VM_V (3);
lw_r4_vm: R3_TO_LWREG_VM_V (4);
lw_r5_vm: R3_TO_LWREG_VM_V (5);
lw_r6_vm: R3_TO_LWREG_VM_V (6);
lw_r7_vm: R3_TO_LWREG_VM_V (7);
lw_r8_vm: R3_TO_LWREG_VM_V (8);
lw_r9_vm: R3_TO_LWREG_VM_V (9);
lw_r10_vm: R3_TO_LWREG_VM_V (10);
lw_r11_vm: R3_TO_LWREG_VM_V (11);
lw_r12_vm: R3_TO_LWREG_VM_V (12);
lw_r13_vm: R3_TO_LWREG_VM_V (13);
lw_r14_vm: R3_TO_LWREG_VM (14);
lw_r15_vm: R3_TO_LWREG_VM_V (15);
lw_r16_vm: R3_TO_LWREG_VM (16);
lw_r17_vm: R3_TO_LWREG_VM_V (17);
lw_r18_vm: R3_TO_LWREG_VM_V (18);
lw_r19_vm: R3_TO_LWREG_VM (19);
lw_r20_vm: R3_TO_LWREG_VM (20);
lw_r21_vm: R3_TO_LWREG_VM (21);
lw_r22_vm: R3_TO_LWREG_VM (22);
lw_r23_vm: R3_TO_LWREG_VM (23);
lw_r24_vm: R3_TO_LWREG_VM (24);
lw_r25_vm: R3_TO_LWREG_VM (25);
lw_r26_vm: R3_TO_LWREG_VM (26);
lw_r27_vm: R3_TO_LWREG_VM (27);
lw_r28_vm: R3_TO_LWREG_VM (28);
lw_r29_vm: R3_TO_LWREG_VM (29);
lw_r30_vm: R3_TO_LWREG_VM (30);
lw_r31_vm: R3_TO_LWREG_VM_V (31);
sw_table_vm:
sw_r0_vm: SWREG_TO_R3_VM (0);
sw_r1_vm: SWREG_TO_R3_VM_V (1);
sw_r2_vm: SWREG_TO_R3_VM_V (2);
sw_r3_vm: SWREG_TO_R3_VM_V (3);
sw_r4_vm: SWREG_TO_R3_VM_V (4);
sw_r5_vm: SWREG_TO_R3_VM_V (5);
sw_r6_vm: SWREG_TO_R3_VM_V (6);
sw_r7_vm: SWREG_TO_R3_VM_V (7);
sw_r8_vm: SWREG_TO_R3_VM_V (8);
sw_r9_vm: SWREG_TO_R3_VM_V (9);
sw_r10_vm: SWREG_TO_R3_VM_V (10);
sw_r11_vm: SWREG_TO_R3_VM_V (11);
sw_r12_vm: SWREG_TO_R3_VM_V (12);
sw_r13_vm: SWREG_TO_R3_VM_V (13);
sw_r14_vm: SWREG_TO_R3_VM (14);
sw_r15_vm: SWREG_TO_R3_VM_V (15);
sw_r16_vm: SWREG_TO_R3_VM (16);
sw_r17_vm: SWREG_TO_R3_VM_V (17);
sw_r18_vm: SWREG_TO_R3_VM_V (18);
sw_r19_vm: SWREG_TO_R3_VM (19);
sw_r20_vm: SWREG_TO_R3_VM (20);
sw_r21_vm: SWREG_TO_R3_VM (21);
sw_r22_vm: SWREG_TO_R3_VM (22);
sw_r23_vm: SWREG_TO_R3_VM (23);
sw_r24_vm: SWREG_TO_R3_VM (24);
sw_r25_vm: SWREG_TO_R3_VM (25);
sw_r26_vm: SWREG_TO_R3_VM (26);
sw_r27_vm: SWREG_TO_R3_VM (27);
sw_r28_vm: SWREG_TO_R3_VM (28);
sw_r29_vm: SWREG_TO_R3_VM (29);
sw_r30_vm: SWREG_TO_R3_VM (30);
sw_r31_vm: SWREG_TO_R3_VM_V (31);
#endif /* CONFIG_MMU */
/* Temporary data structures used in the handler */ /* Temporary data structures used in the handler */
.section .data .section .data
......
...@@ -45,3 +45,5 @@ extern void __udivsi3(void); ...@@ -45,3 +45,5 @@ extern void __udivsi3(void);
EXPORT_SYMBOL(__udivsi3); EXPORT_SYMBOL(__udivsi3);
extern void __umodsi3(void); extern void __umodsi3(void);
EXPORT_SYMBOL(__umodsi3); EXPORT_SYMBOL(__umodsi3);
extern char *_ebss;
EXPORT_SYMBOL_GPL(_ebss);
/*
* Miscellaneous low-level MMU functions.
*
* Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu>
* Copyright (C) 2008-2009 PetaLogix
* Copyright (C) 2007 Xilinx, Inc. All rights reserved.
*
* Derived from arch/ppc/kernel/misc.S
*
* This file is subject to the terms and conditions of the GNU General
* Public License. See the file COPYING in the main directory of this
* archive for more details.
*/
#include <linux/linkage.h>
#include <linux/sys.h>
#include <asm/unistd.h>
#include <linux/errno.h>
#include <asm/mmu.h>
#include <asm/page.h>
.text
/*
* Flush MMU TLB
*
* We avoid flushing the pinned 0, 1 and possibly 2 entries.
*/
.globl _tlbia;
.align 4;
_tlbia:
addik r12, r0, 63 /* flush all entries (63 - 3) */
/* isync */
_tlbia_1:
mts rtlbx, r12
nop
mts rtlbhi, r0 /* flush: ensure V is clear */
nop
addik r11, r12, -2
bneid r11, _tlbia_1 /* loop for all entries */
addik r12, r12, -1
/* sync */
rtsd r15, 8
nop
/*
* Flush MMU TLB for a particular address (in r5)
*/
.globl _tlbie;
.align 4;
_tlbie:
mts rtlbsx, r5 /* look up the address in TLB */
nop
mfs r12, rtlbx /* Retrieve index */
nop
blti r12, _tlbie_1 /* Check if found */
mts rtlbhi, r0 /* flush: ensure V is clear */
nop
_tlbie_1:
rtsd r15, 8
nop
/*
* Allocate TLB entry for early console
*/
.globl early_console_reg_tlb_alloc;
.align 4;
early_console_reg_tlb_alloc:
/*
* Load a TLB entry for the UART, so that microblaze_progress() can use
* the UARTs nice and early. We use a 4k real==virtual mapping.
*/
ori r4, r0, 63
mts rtlbx, r4 /* TLB slot 2 */
or r4,r5,r0
andi r4,r4,0xfffff000
ori r4,r4,(TLB_WR|TLB_I|TLB_M|TLB_G)
andi r5,r5,0xfffff000
ori r5,r5,(TLB_VALID | TLB_PAGESZ(PAGESZ_4K))
mts rtlblo,r4 /* Load the data portion of the entry */
nop
mts rtlbhi,r5 /* Load the tag portion of the entry */
nop
rtsd r15, 8
nop
/*
* Copy a whole page (4096 bytes).
*/
#define COPY_16_BYTES \
lwi r7, r6, 0; \
lwi r8, r6, 4; \
lwi r9, r6, 8; \
lwi r10, r6, 12; \
swi r7, r5, 0; \
swi r8, r5, 4; \
swi r9, r5, 8; \
swi r10, r5, 12
/* FIXME DCACHE_LINE_BYTES (CONFIG_XILINX_MICROBLAZE0_DCACHE_LINE_LEN * 4)*/
#define DCACHE_LINE_BYTES (4 * 4)
.globl copy_page;
.align 4;
copy_page:
ori r11, r0, (PAGE_SIZE/DCACHE_LINE_BYTES) - 1
_copy_page_loop:
COPY_16_BYTES
#if DCACHE_LINE_BYTES >= 32
COPY_16_BYTES
#endif
addik r6, r6, DCACHE_LINE_BYTES
addik r5, r5, DCACHE_LINE_BYTES
bneid r11, _copy_page_loop
addik r11, r11, -1
rtsd r15, 8
nop
...@@ -126,9 +126,54 @@ int copy_thread(unsigned long clone_flags, unsigned long usp, ...@@ -126,9 +126,54 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
else else
childregs->r1 = ((unsigned long) ti) + THREAD_SIZE; childregs->r1 = ((unsigned long) ti) + THREAD_SIZE;
#ifndef CONFIG_MMU
memset(&ti->cpu_context, 0, sizeof(struct cpu_context)); memset(&ti->cpu_context, 0, sizeof(struct cpu_context));
ti->cpu_context.r1 = (unsigned long)childregs; ti->cpu_context.r1 = (unsigned long)childregs;
ti->cpu_context.msr = (unsigned long)childregs->msr; ti->cpu_context.msr = (unsigned long)childregs->msr;
#else
/* if creating a kernel thread then update the current reg (we don't
* want to use the parent's value when restoring by POP_STATE) */
if (kernel_mode(regs))
/* save new current on stack to use POP_STATE */
childregs->CURRENT_TASK = (unsigned long)p;
/* if returning to user then use the parent's value of this register */
/* if we're creating a new kernel thread then just zeroing all
* the registers. That's OK for a brand new thread.*/
/* Pls. note that some of them will be restored in POP_STATE */
if (kernel_mode(regs))
memset(&ti->cpu_context, 0, sizeof(struct cpu_context));
/* if this thread is created for fork/vfork/clone, then we want to
* restore all the parent's context */
/* in addition to the registers which will be restored by POP_STATE */
else {
ti->cpu_context = *(struct cpu_context *)regs;
childregs->msr |= MSR_UMS;
}
/* FIXME STATE_SAVE_PT_OFFSET; */
ti->cpu_context.r1 = (unsigned long)childregs - STATE_SAVE_ARG_SPACE;
/* we should consider the fact that childregs is a copy of the parent
* regs which were saved immediately after entering the kernel state
* before enabling VM. This MSR will be restored in switch_to and
* RETURN() and we want to have the right machine state there
* specifically this state must have INTs disabled before and enabled
* after performing rtbd
* compose the right MSR for RETURN(). It will work for switch_to also
* excepting for VM and UMS
* don't touch UMS , CARRY and cache bits
* right now MSR is a copy of parent one */
childregs->msr |= MSR_BIP;
childregs->msr &= ~MSR_EIP;
childregs->msr |= MSR_IE;
childregs->msr &= ~MSR_VM;
childregs->msr |= MSR_VMS;
childregs->msr |= MSR_EE; /* exceptions will be enabled*/
ti->cpu_context.msr = (childregs->msr|MSR_VM);
ti->cpu_context.msr &= ~MSR_UMS; /* switch_to to kernel mode */
#endif
ti->cpu_context.r15 = (unsigned long)ret_from_fork - 8; ti->cpu_context.r15 = (unsigned long)ret_from_fork - 8;
if (clone_flags & CLONE_SETTLS) if (clone_flags & CLONE_SETTLS)
...@@ -137,6 +182,7 @@ int copy_thread(unsigned long clone_flags, unsigned long usp, ...@@ -137,6 +182,7 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
return 0; return 0;
} }
#ifndef CONFIG_MMU
/* /*
* Return saved PC of a blocked thread. * Return saved PC of a blocked thread.
*/ */
...@@ -151,6 +197,7 @@ unsigned long thread_saved_pc(struct task_struct *tsk) ...@@ -151,6 +197,7 @@ unsigned long thread_saved_pc(struct task_struct *tsk)
else else
return ctx->r14; return ctx->r14;
} }
#endif
static void kernel_thread_helper(int (*fn)(void *), void *arg) static void kernel_thread_helper(int (*fn)(void *), void *arg)
{ {
...@@ -173,6 +220,7 @@ int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags) ...@@ -173,6 +220,7 @@ int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags)
return do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0, return do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0,
&regs, 0, NULL, NULL); &regs, 0, NULL, NULL);
} }
EXPORT_SYMBOL_GPL(kernel_thread);
unsigned long get_wchan(struct task_struct *p) unsigned long get_wchan(struct task_struct *p)
{ {
...@@ -188,3 +236,14 @@ void start_thread(struct pt_regs *regs, unsigned long pc, unsigned long usp) ...@@ -188,3 +236,14 @@ void start_thread(struct pt_regs *regs, unsigned long pc, unsigned long usp)
regs->r1 = usp; regs->r1 = usp;
regs->pt_mode = 0; regs->pt_mode = 0;
} }
#ifdef CONFIG_MMU
#include <linux/elfcore.h>
/*
* Set up a thread for executing a new program
*/
int dump_fpu(struct pt_regs *regs, elf_fpregset_t *fpregs)
{
return 0; /* MicroBlaze has no separate FPU registers */
}
#endif /* CONFIG_MMU */
...@@ -509,12 +509,13 @@ static void __init early_init_dt_check_for_initrd(unsigned long node) ...@@ -509,12 +509,13 @@ static void __init early_init_dt_check_for_initrd(unsigned long node)
prop = of_get_flat_dt_prop(node, "linux,initrd-start", &l); prop = of_get_flat_dt_prop(node, "linux,initrd-start", &l);
if (prop) { if (prop) {
initrd_start = (unsigned long)__va(of_read_ulong(prop, l/4)); initrd_start = (unsigned long)
__va((u32)of_read_ulong(prop, l/4));
prop = of_get_flat_dt_prop(node, "linux,initrd-end", &l); prop = of_get_flat_dt_prop(node, "linux,initrd-end", &l);
if (prop) { if (prop) {
initrd_end = (unsigned long) initrd_end = (unsigned long)
__va(of_read_ulong(prop, l/4)); __va((u32)of_read_ulong(prop, 1/4));
initrd_below_start_ok = 1; initrd_below_start_ok = 1;
} else { } else {
initrd_start = 0; initrd_start = 0;
...@@ -563,7 +564,9 @@ static int __init early_init_dt_scan_chosen(unsigned long node, ...@@ -563,7 +564,9 @@ static int __init early_init_dt_scan_chosen(unsigned long node,
strlcpy(cmd_line, p, min((int)l, COMMAND_LINE_SIZE)); strlcpy(cmd_line, p, min((int)l, COMMAND_LINE_SIZE));
#ifdef CONFIG_CMDLINE #ifdef CONFIG_CMDLINE
#ifndef CONFIG_CMDLINE_FORCE
if (p == NULL || l == 0 || (l == 1 && (*p) == 0)) if (p == NULL || l == 0 || (l == 1 && (*p) == 0))
#endif
strlcpy(cmd_line, CONFIG_CMDLINE, COMMAND_LINE_SIZE); strlcpy(cmd_line, CONFIG_CMDLINE, COMMAND_LINE_SIZE);
#endif /* CONFIG_CMDLINE */ #endif /* CONFIG_CMDLINE */
......
...@@ -42,10 +42,6 @@ char cmd_line[COMMAND_LINE_SIZE]; ...@@ -42,10 +42,6 @@ char cmd_line[COMMAND_LINE_SIZE];
void __init setup_arch(char **cmdline_p) void __init setup_arch(char **cmdline_p)
{ {
#ifdef CONFIG_CMDLINE_FORCE
strlcpy(cmd_line, CONFIG_CMDLINE, COMMAND_LINE_SIZE);
strlcpy(boot_command_line, CONFIG_CMDLINE, COMMAND_LINE_SIZE);
#endif
*cmdline_p = cmd_line; *cmdline_p = cmd_line;
console_verbose(); console_verbose();
...@@ -102,14 +98,34 @@ void __init machine_early_init(const char *cmdline, unsigned int ram, ...@@ -102,14 +98,34 @@ void __init machine_early_init(const char *cmdline, unsigned int ram,
{ {
unsigned long *src, *dst = (unsigned long *)0x0; unsigned long *src, *dst = (unsigned long *)0x0;
/* If CONFIG_MTD_UCLINUX is defined, assume ROMFS is at the
* end of kernel. There are two position which we want to check.
* The first is __init_end and the second __bss_start.
*/
#ifdef CONFIG_MTD_UCLINUX
int romfs_size;
unsigned int romfs_base;
char *old_klimit = klimit;
romfs_base = (ram ? ram : (unsigned int)&__init_end);
romfs_size = PAGE_ALIGN(get_romfs_len((unsigned *)romfs_base));
if (!romfs_size) {
romfs_base = (unsigned int)&__bss_start;
romfs_size = PAGE_ALIGN(get_romfs_len((unsigned *)romfs_base));
}
/* Move ROMFS out of BSS before clearing it */
if (romfs_size > 0) {
memmove(&_ebss, (int *)romfs_base, romfs_size);
klimit += romfs_size;
}
#endif
/* clearing bss section */ /* clearing bss section */
memset(__bss_start, 0, __bss_stop-__bss_start); memset(__bss_start, 0, __bss_stop-__bss_start);
memset(_ssbss, 0, _esbss-_ssbss); memset(_ssbss, 0, _esbss-_ssbss);
/* /* Copy command line passed from bootloader */
* Copy command line passed from bootloader, or use default
* if none provided, or forced
*/
#ifndef CONFIG_CMDLINE_BOOL #ifndef CONFIG_CMDLINE_BOOL
if (cmdline && cmdline[0] != '\0') if (cmdline && cmdline[0] != '\0')
strlcpy(cmd_line, cmdline, COMMAND_LINE_SIZE); strlcpy(cmd_line, cmdline, COMMAND_LINE_SIZE);
...@@ -126,27 +142,15 @@ void __init machine_early_init(const char *cmdline, unsigned int ram, ...@@ -126,27 +142,15 @@ void __init machine_early_init(const char *cmdline, unsigned int ram,
printk(KERN_NOTICE "Found FDT at 0x%08x\n", fdt); printk(KERN_NOTICE "Found FDT at 0x%08x\n", fdt);
#ifdef CONFIG_MTD_UCLINUX #ifdef CONFIG_MTD_UCLINUX
{
int size;
unsigned int romfs_base;
romfs_base = (ram ? ram : (unsigned int)&__init_end);
/* if CONFIG_MTD_UCLINUX_EBSS is defined, assume ROMFS is at the
* end of kernel, which is ROMFS_LOCATION defined above. */
size = PAGE_ALIGN(get_romfs_len((unsigned *)romfs_base));
early_printk("Found romfs @ 0x%08x (0x%08x)\n", early_printk("Found romfs @ 0x%08x (0x%08x)\n",
romfs_base, size); romfs_base, romfs_size);
early_printk("#### klimit %p ####\n", klimit); early_printk("#### klimit %p ####\n", old_klimit);
BUG_ON(size < 0); /* What else can we do? */ BUG_ON(romfs_size < 0); /* What else can we do? */
/* Use memmove to handle likely case of memory overlap */ early_printk("Moved 0x%08x bytes from 0x%08x to 0x%08x\n",
early_printk("Moving 0x%08x bytes from 0x%08x to 0x%08x\n", romfs_size, romfs_base, (unsigned)&_ebss);
size, romfs_base, (unsigned)&_ebss);
memmove(&_ebss, (int *)romfs_base, size);
/* update klimit */
klimit += PAGE_ALIGN(size);
early_printk("New klimit: 0x%08x\n", (unsigned)klimit); early_printk("New klimit: 0x%08x\n", (unsigned)klimit);
}
#endif #endif
for (src = __ivt_start; src < __ivt_end; src++, dst++) for (src = __ivt_start; src < __ivt_end; src++, dst++)
......
...@@ -152,8 +152,8 @@ struct rt_sigframe { ...@@ -152,8 +152,8 @@ struct rt_sigframe {
unsigned long tramp[2]; /* signal trampoline */ unsigned long tramp[2]; /* signal trampoline */
}; };
static int static int restore_sigcontext(struct pt_regs *regs,
restore_sigcontext(struct pt_regs *regs, struct sigcontext *sc, int *rval_p) struct sigcontext __user *sc, int *rval_p)
{ {
unsigned int err = 0; unsigned int err = 0;
...@@ -211,11 +211,10 @@ asmlinkage int sys_sigreturn(struct pt_regs *regs) ...@@ -211,11 +211,10 @@ asmlinkage int sys_sigreturn(struct pt_regs *regs)
asmlinkage int sys_rt_sigreturn(struct pt_regs *regs) asmlinkage int sys_rt_sigreturn(struct pt_regs *regs)
{ {
struct rt_sigframe *frame = struct rt_sigframe __user *frame =
(struct rt_sigframe *)(regs->r1 + STATE_SAVE_ARG_SPACE); (struct rt_sigframe __user *)(regs->r1 + STATE_SAVE_ARG_SPACE);
sigset_t set; sigset_t set;
stack_t st;
int rval; int rval;
if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
...@@ -233,11 +232,10 @@ asmlinkage int sys_rt_sigreturn(struct pt_regs *regs) ...@@ -233,11 +232,10 @@ asmlinkage int sys_rt_sigreturn(struct pt_regs *regs)
if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &rval)) if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &rval))
goto badframe; goto badframe;
if (__copy_from_user((void *)&st, &frame->uc.uc_stack, sizeof(st)))
goto badframe;
/* It is more difficult to avoid calling this function than to /* It is more difficult to avoid calling this function than to
call it and ignore errors. */ call it and ignore errors. */
do_sigaltstack(&st, NULL, regs->r1); if (do_sigaltstack(&frame->uc.uc_stack, NULL, regs->r1))
goto badframe;
return rval; return rval;
...@@ -251,7 +249,7 @@ asmlinkage int sys_rt_sigreturn(struct pt_regs *regs) ...@@ -251,7 +249,7 @@ asmlinkage int sys_rt_sigreturn(struct pt_regs *regs)
*/ */
static int static int
setup_sigcontext(struct sigcontext *sc, struct pt_regs *regs, setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs,
unsigned long mask) unsigned long mask)
{ {
int err = 0; int err = 0;
...@@ -278,7 +276,7 @@ setup_sigcontext(struct sigcontext *sc, struct pt_regs *regs, ...@@ -278,7 +276,7 @@ setup_sigcontext(struct sigcontext *sc, struct pt_regs *regs,
/* /*
* Determine which stack to use.. * Determine which stack to use..
*/ */
static inline void * static inline void __user *
get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size) get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size)
{ {
/* Default to using normal stack */ /* Default to using normal stack */
...@@ -287,87 +285,13 @@ get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size) ...@@ -287,87 +285,13 @@ get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size)
if ((ka->sa.sa_flags & SA_ONSTACK) != 0 && !on_sig_stack(sp)) if ((ka->sa.sa_flags & SA_ONSTACK) != 0 && !on_sig_stack(sp))
sp = current->sas_ss_sp + current->sas_ss_size; sp = current->sas_ss_sp + current->sas_ss_size;
return (void *)((sp - frame_size) & -8UL); return (void __user *)((sp - frame_size) & -8UL);
}
static void setup_frame(int sig, struct k_sigaction *ka,
sigset_t *set, struct pt_regs *regs)
{
struct sigframe *frame;
int err = 0;
int signal;
frame = get_sigframe(ka, regs, sizeof(*frame));
if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
goto give_sigsegv;
signal = current_thread_info()->exec_domain
&& current_thread_info()->exec_domain->signal_invmap
&& sig < 32
? current_thread_info()->exec_domain->signal_invmap[sig]
: sig;
err |= setup_sigcontext(&frame->sc, regs, set->sig[0]);
if (_NSIG_WORDS > 1) {
err |= __copy_to_user(frame->extramask, &set->sig[1],
sizeof(frame->extramask));
}
/* Set up to return from userspace. If provided, use a stub
already in userspace. */
/* minus 8 is offset to cater for "rtsd r15,8" offset */
if (ka->sa.sa_flags & SA_RESTORER) {
regs->r15 = ((unsigned long)ka->sa.sa_restorer)-8;
} else {
/* Note, these encodings are _big endian_! */
/* addi r12, r0, __NR_sigreturn */
err |= __put_user(0x31800000 | __NR_sigreturn ,
frame->tramp + 0);
/* brki r14, 0x8 */
err |= __put_user(0xb9cc0008, frame->tramp + 1);
/* Return from sighandler will jump to the tramp.
Negative 8 offset because return is rtsd r15, 8 */
regs->r15 = ((unsigned long)frame->tramp)-8;
__invalidate_cache_sigtramp((unsigned long)frame->tramp);
}
if (err)
goto give_sigsegv;
/* Set up registers for signal handler */
regs->r1 = (unsigned long) frame - STATE_SAVE_ARG_SPACE;
/* Signal handler args: */
regs->r5 = signal; /* Arg 0: signum */
regs->r6 = (unsigned long) &frame->sc; /* arg 1: sigcontext */
/* Offset of 4 to handle microblaze rtid r14, 0 */
regs->pc = (unsigned long)ka->sa.sa_handler;
set_fs(USER_DS);
#ifdef DEBUG_SIG
printk(KERN_INFO "SIG deliver (%s:%d): sp=%p pc=%08lx\n",
current->comm, current->pid, frame, regs->pc);
#endif
return;
give_sigsegv:
if (sig == SIGSEGV)
ka->sa.sa_handler = SIG_DFL;
force_sig(SIGSEGV, current);
} }
static void setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, static void setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
sigset_t *set, struct pt_regs *regs) sigset_t *set, struct pt_regs *regs)
{ {
struct rt_sigframe *frame; struct rt_sigframe __user *frame;
int err = 0; int err = 0;
int signal; int signal;
...@@ -382,6 +306,7 @@ static void setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, ...@@ -382,6 +306,7 @@ static void setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
? current_thread_info()->exec_domain->signal_invmap[sig] ? current_thread_info()->exec_domain->signal_invmap[sig]
: sig; : sig;
if (info)
err |= copy_siginfo_to_user(&frame->info, info); err |= copy_siginfo_to_user(&frame->info, info);
/* Create the ucontext. */ /* Create the ucontext. */
...@@ -463,7 +388,15 @@ handle_restart(struct pt_regs *regs, struct k_sigaction *ka, int has_handler) ...@@ -463,7 +388,15 @@ handle_restart(struct pt_regs *regs, struct k_sigaction *ka, int has_handler)
case -ERESTARTNOINTR: case -ERESTARTNOINTR:
do_restart: do_restart:
/* offset of 4 bytes to re-execute trap (brki) instruction */ /* offset of 4 bytes to re-execute trap (brki) instruction */
#ifndef CONFIG_MMU
regs->pc -= 4; regs->pc -= 4;
#else
/* offset of 8 bytes required = 4 for rtbd
offset, plus 4 for size of
"brki r14,8"
instruction. */
regs->pc -= 8;
#endif
break; break;
} }
} }
...@@ -480,7 +413,7 @@ handle_signal(unsigned long sig, struct k_sigaction *ka, ...@@ -480,7 +413,7 @@ handle_signal(unsigned long sig, struct k_sigaction *ka,
if (ka->sa.sa_flags & SA_SIGINFO) if (ka->sa.sa_flags & SA_SIGINFO)
setup_rt_frame(sig, ka, info, oldset, regs); setup_rt_frame(sig, ka, info, oldset, regs);
else else
setup_frame(sig, ka, oldset, regs); setup_rt_frame(sig, ka, NULL, oldset, regs);
if (ka->sa.sa_flags & SA_ONESHOT) if (ka->sa.sa_flags & SA_ONESHOT)
ka->sa.sa_handler = SIG_DFL; ka->sa.sa_handler = SIG_DFL;
......
...@@ -2,7 +2,11 @@ ENTRY(sys_call_table) ...@@ -2,7 +2,11 @@ ENTRY(sys_call_table)
.long sys_restart_syscall /* 0 - old "setup()" system call, .long sys_restart_syscall /* 0 - old "setup()" system call,
* used for restarting */ * used for restarting */
.long sys_exit .long sys_exit
.long sys_ni_syscall /* was fork */ #ifdef CONFIG_MMU
.long sys_fork_wrapper
#else
.long sys_ni_syscall
#endif
.long sys_read .long sys_read
.long sys_write .long sys_write
.long sys_open /* 5 */ .long sys_open /* 5 */
......
...@@ -22,14 +22,6 @@ void trap_init(void) ...@@ -22,14 +22,6 @@ void trap_init(void)
__enable_hw_exceptions(); __enable_hw_exceptions();
} }
void __bad_xchg(volatile void *ptr, int size)
{
printk(KERN_INFO "xchg: bad data size: pc 0x%p, ptr 0x%p, size %d\n",
__builtin_return_address(0), ptr, size);
BUG();
}
EXPORT_SYMBOL(__bad_xchg);
static int kstack_depth_to_print = 24; static int kstack_depth_to_print = 24;
static int __init kstack_setup(char *s) static int __init kstack_setup(char *s)
...@@ -105,3 +97,37 @@ void dump_stack(void) ...@@ -105,3 +97,37 @@ void dump_stack(void)
show_stack(NULL, NULL); show_stack(NULL, NULL);
} }
EXPORT_SYMBOL(dump_stack); EXPORT_SYMBOL(dump_stack);
#ifdef CONFIG_MMU
void __bug(const char *file, int line, void *data)
{
if (data)
printk(KERN_CRIT "kernel BUG at %s:%d (data = %p)!\n",
file, line, data);
else
printk(KERN_CRIT "kernel BUG at %s:%d!\n", file, line);
machine_halt();
}
int bad_trap(int trap_num, struct pt_regs *regs)
{
printk(KERN_CRIT
"unimplemented trap %d called at 0x%08lx, pid %d!\n",
trap_num, regs->pc, current->pid);
return -ENOSYS;
}
int debug_trap(struct pt_regs *regs)
{
int i;
printk(KERN_CRIT "debug trap\n");
for (i = 0; i < 32; i++) {
/* printk("r%i:%08X\t",i,regs->gpr[i]); */
if ((i % 4) == 3)
printk(KERN_CRIT "\n");
}
printk(KERN_CRIT "pc:%08lX\tmsr:%08lX\n", regs->pc, regs->msr);
return -ENOSYS;
}
#endif
...@@ -17,8 +17,7 @@ ENTRY(_start) ...@@ -17,8 +17,7 @@ ENTRY(_start)
jiffies = jiffies_64 + 4; jiffies = jiffies_64 + 4;
SECTIONS { SECTIONS {
. = CONFIG_KERNEL_BASE_ADDR; . = CONFIG_KERNEL_START;
.text : { .text : {
_text = . ; _text = . ;
_stext = . ; _stext = . ;
...@@ -132,6 +131,8 @@ SECTIONS { ...@@ -132,6 +131,8 @@ SECTIONS {
__con_initcall_end = .; __con_initcall_end = .;
} }
SECURITY_INIT
__init_end_before_initramfs = .; __init_end_before_initramfs = .;
.init.ramfs ALIGN(4096) : { .init.ramfs ALIGN(4096) : {
......
...@@ -10,4 +10,5 @@ else ...@@ -10,4 +10,5 @@ else
lib-y += memcpy.o memmove.o lib-y += memcpy.o memmove.o
endif endif
lib-y += uaccess.o lib-$(CONFIG_NO_MMU) += uaccess.o
lib-$(CONFIG_MMU) += uaccess_old.o
...@@ -32,9 +32,10 @@ ...@@ -32,9 +32,10 @@
/* Revised by Kenneth Albanowski for m68knommu. Basic problem: unaligned access /* Revised by Kenneth Albanowski for m68knommu. Basic problem: unaligned access
kills, so most of the assembly has to go. */ kills, so most of the assembly has to go. */
#include <net/checksum.h>
#include <asm/checksum.h>
#include <linux/module.h> #include <linux/module.h>
#include <net/checksum.h>
#include <asm/byteorder.h>
static inline unsigned short from32to16(unsigned long x) static inline unsigned short from32to16(unsigned long x)
{ {
...@@ -102,6 +103,7 @@ __sum16 ip_fast_csum(const void *iph, unsigned int ihl) ...@@ -102,6 +103,7 @@ __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
{ {
return (__force __sum16)~do_csum(iph, ihl*4); return (__force __sum16)~do_csum(iph, ihl*4);
} }
EXPORT_SYMBOL(ip_fast_csum);
/* /*
* computes the checksum of a memory block at buff, length len, * computes the checksum of a memory block at buff, length len,
...@@ -115,15 +117,16 @@ __sum16 ip_fast_csum(const void *iph, unsigned int ihl) ...@@ -115,15 +117,16 @@ __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
* *
* it's best to have buff aligned on a 32-bit boundary * it's best to have buff aligned on a 32-bit boundary
*/ */
__wsum csum_partial(const void *buff, int len, __wsum sum) __wsum csum_partial(const void *buff, int len, __wsum wsum)
{ {
unsigned int sum = (__force unsigned int)wsum;
unsigned int result = do_csum(buff, len); unsigned int result = do_csum(buff, len);
/* add in old sum, and carry.. */ /* add in old sum, and carry.. */
result += sum; result += sum;
if (sum > result) if (sum > result)
result += 1; result += 1;
return result; return (__force __wsum)result;
} }
EXPORT_SYMBOL(csum_partial); EXPORT_SYMBOL(csum_partial);
...@@ -131,9 +134,9 @@ EXPORT_SYMBOL(csum_partial); ...@@ -131,9 +134,9 @@ EXPORT_SYMBOL(csum_partial);
* this routine is used for miscellaneous IP-like checksums, mainly * this routine is used for miscellaneous IP-like checksums, mainly
* in icmp.c * in icmp.c
*/ */
__sum16 ip_compute_csum(const unsigned char *buff, int len) __sum16 ip_compute_csum(const void *buff, int len)
{ {
return ~do_csum(buff, len); return (__force __sum16)~do_csum(buff, len);
} }
EXPORT_SYMBOL(ip_compute_csum); EXPORT_SYMBOL(ip_compute_csum);
...@@ -141,12 +144,18 @@ EXPORT_SYMBOL(ip_compute_csum); ...@@ -141,12 +144,18 @@ EXPORT_SYMBOL(ip_compute_csum);
* copy from fs while checksumming, otherwise like csum_partial * copy from fs while checksumming, otherwise like csum_partial
*/ */
__wsum __wsum
csum_partial_copy_from_user(const char __user *src, char *dst, int len, csum_partial_copy_from_user(const void __user *src, void *dst, int len,
int sum, int *csum_err) __wsum sum, int *csum_err)
{ {
if (csum_err) int missing;
missing = __copy_from_user(dst, src, len);
if (missing) {
memset(dst + len - missing, 0, missing);
*csum_err = -EFAULT;
} else
*csum_err = 0; *csum_err = 0;
memcpy(dst, src, len);
return csum_partial(dst, len, sum); return csum_partial(dst, len, sum);
} }
EXPORT_SYMBOL(csum_partial_copy_from_user); EXPORT_SYMBOL(csum_partial_copy_from_user);
...@@ -155,7 +164,7 @@ EXPORT_SYMBOL(csum_partial_copy_from_user); ...@@ -155,7 +164,7 @@ EXPORT_SYMBOL(csum_partial_copy_from_user);
* copy from ds while checksumming, otherwise like csum_partial * copy from ds while checksumming, otherwise like csum_partial
*/ */
__wsum __wsum
csum_partial_copy(const char *src, char *dst, int len, int sum) csum_partial_copy(const void *src, void *dst, int len, __wsum sum)
{ {
memcpy(dst, src, len); memcpy(dst, src, len);
return csum_partial(dst, len, sum); return csum_partial(dst, len, sum);
......
...@@ -154,8 +154,3 @@ void *memcpy(void *v_dst, const void *v_src, __kernel_size_t c) ...@@ -154,8 +154,3 @@ void *memcpy(void *v_dst, const void *v_src, __kernel_size_t c)
} }
EXPORT_SYMBOL(memcpy); EXPORT_SYMBOL(memcpy);
#endif /* __HAVE_ARCH_MEMCPY */ #endif /* __HAVE_ARCH_MEMCPY */
void *cacheable_memcpy(void *d, const void *s, __kernel_size_t c)
{
return memcpy(d, s, c);
}
/*
* Copyright (C) 2009 Michal Simek <monstr@monstr.eu>
* Copyright (C) 2009 PetaLogix
* Copyright (C) 2007 LynuxWorks, Inc.
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#include <linux/errno.h>
#include <linux/linkage.h>
/*
* int __strncpy_user(char *to, char *from, int len);
*
* Returns:
* -EFAULT for an exception
* len if we hit the buffer limit
* bytes copied
*/
.text
.globl __strncpy_user;
.align 4;
__strncpy_user:
/*
* r5 - to
* r6 - from
* r7 - len
* r3 - temp count
* r4 - temp val
*/
addik r3,r7,0 /* temp_count = len */
beqi r3,3f
1:
lbu r4,r6,r0
sb r4,r5,r0
addik r3,r3,-1
beqi r3,2f /* break on len */
addik r5,r5,1
bneid r4,1b
addik r6,r6,1 /* delay slot */
addik r3,r3,1 /* undo "temp_count--" */
2:
rsubk r3,r3,r7 /* temp_count = len - temp_count */
3:
rtsd r15,8
nop
.section .fixup, "ax"
.align 2
4:
brid 3b
addik r3,r0, -EFAULT
.section __ex_table, "a"
.word 1b,4b
/*
* int __strnlen_user(char __user *str, int maxlen);
*
* Returns:
* 0 on error
* maxlen + 1 if no NUL byte found within maxlen bytes
* size of the string (including NUL byte)
*/
.text
.globl __strnlen_user;
.align 4;
__strnlen_user:
addik r3,r6,0
beqi r3,3f
1:
lbu r4,r5,r0
beqid r4,2f /* break on NUL */
addik r3,r3,-1 /* delay slot */
bneid r3,1b
addik r5,r5,1 /* delay slot */
addik r3,r3,-1 /* for break on len */
2:
rsubk r3,r3,r6
3:
rtsd r15,8
nop
.section .fixup,"ax"
4:
brid 3b
addk r3,r0,r0
.section __ex_table,"a"
.word 1b,4b
/*
* int __copy_tofrom_user(char *to, char *from, int len)
* Return:
* 0 on success
* number of not copied bytes on error
*/
.text
.globl __copy_tofrom_user;
.align 4;
__copy_tofrom_user:
/*
* r5 - to
* r6 - from
* r7, r3 - count
* r4 - tempval
*/
addik r3,r7,0
beqi r3,3f
1:
lbu r4,r6,r0
addik r6,r6,1
2:
sb r4,r5,r0
addik r3,r3,-1
bneid r3,1b
addik r5,r5,1 /* delay slot */
3:
rtsd r15,8
nop
.section __ex_table,"a"
.word 1b,3b,2b,3b
...@@ -3,3 +3,5 @@ ...@@ -3,3 +3,5 @@
# #
obj-y := init.o obj-y := init.o
obj-$(CONFIG_MMU) += pgtable.o mmu_context.o fault.o
/*
* arch/microblaze/mm/fault.c
*
* Copyright (C) 2007 Xilinx, Inc. All rights reserved.
*
* Derived from "arch/ppc/mm/fault.c"
* Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
*
* Derived from "arch/i386/mm/fault.c"
* Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
*
* Modified by Cort Dougan and Paul Mackerras.
*
* This file is subject to the terms and conditions of the GNU General
* Public License. See the file COPYING in the main directory of this
* archive for more details.
*
*/
#include <linux/module.h>
#include <linux/signal.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/ptrace.h>
#include <linux/mman.h>
#include <linux/mm.h>
#include <linux/interrupt.h>
#include <asm/page.h>
#include <asm/pgtable.h>
#include <asm/mmu.h>
#include <asm/mmu_context.h>
#include <asm/system.h>
#include <linux/uaccess.h>
#include <asm/exceptions.h>
#if defined(CONFIG_KGDB)
int debugger_kernel_faults = 1;
#endif
static unsigned long pte_misses; /* updated by do_page_fault() */
static unsigned long pte_errors; /* updated by do_page_fault() */
/*
* Check whether the instruction at regs->pc is a store using
* an update addressing form which will update r1.
*/
static int store_updates_sp(struct pt_regs *regs)
{
unsigned int inst;
if (get_user(inst, (unsigned int *)regs->pc))
return 0;
/* check for 1 in the rD field */
if (((inst >> 21) & 0x1f) != 1)
return 0;
/* check for store opcodes */
if ((inst & 0xd0000000) == 0xd0000000)
return 1;
return 0;
}
/*
* bad_page_fault is called when we have a bad access from the kernel.
* It is called from do_page_fault above and from some of the procedures
* in traps.c.
*/
static void bad_page_fault(struct pt_regs *regs, unsigned long address, int sig)
{
const struct exception_table_entry *fixup;
/* MS: no context */
/* Are we prepared to handle this fault? */
fixup = search_exception_tables(regs->pc);
if (fixup) {
regs->pc = fixup->fixup;
return;
}
/* kernel has accessed a bad area */
#if defined(CONFIG_KGDB)
if (debugger_kernel_faults)
debugger(regs);
#endif
die("kernel access of bad area", regs, sig);
}
/*
* The error_code parameter is ESR for a data fault,
* 0 for an instruction fault.
*/
void do_page_fault(struct pt_regs *regs, unsigned long address,
unsigned long error_code)
{
struct vm_area_struct *vma;
struct mm_struct *mm = current->mm;
siginfo_t info;
int code = SEGV_MAPERR;
int is_write = error_code & ESR_S;
int fault;
regs->ear = address;
regs->esr = error_code;
/* On a kernel SLB miss we can only check for a valid exception entry */
if (kernel_mode(regs) && (address >= TASK_SIZE)) {
printk(KERN_WARNING "kernel task_size exceed");
_exception(SIGSEGV, regs, code, address);
}
/* for instr TLB miss and instr storage exception ESR_S is undefined */
if ((error_code & 0x13) == 0x13 || (error_code & 0x11) == 0x11)
is_write = 0;
#if defined(CONFIG_KGDB)
if (debugger_fault_handler && regs->trap == 0x300) {
debugger_fault_handler(regs);
return;
}
#endif /* CONFIG_KGDB */
if (in_atomic() || mm == NULL) {
/* FIXME */
if (kernel_mode(regs)) {
printk(KERN_EMERG
"Page fault in kernel mode - Oooou!!! pid %d\n",
current->pid);
_exception(SIGSEGV, regs, code, address);
return;
}
/* in_atomic() in user mode is really bad,
as is current->mm == NULL. */
printk(KERN_EMERG "Page fault in user mode with "
"in_atomic(), mm = %p\n", mm);
printk(KERN_EMERG "r15 = %lx MSR = %lx\n",
regs->r15, regs->msr);
die("Weird page fault", regs, SIGSEGV);
}
/* When running in the kernel we expect faults to occur only to
* addresses in user space. All other faults represent errors in the
* kernel and should generate an OOPS. Unfortunately, in the case of an
* erroneous fault occurring in a code path which already holds mmap_sem
* we will deadlock attempting to validate the fault against the
* address space. Luckily the kernel only validly references user
* space from well defined areas of code, which are listed in the
* exceptions table.
*
* As the vast majority of faults will be valid we will only perform
* the source reference check when there is a possibility of a deadlock.
* Attempt to lock the address space, if we cannot we then validate the
* source. If this is invalid we can skip the address space check,
* thus avoiding the deadlock.
*/
if (!down_read_trylock(&mm->mmap_sem)) {
if (kernel_mode(regs) && !search_exception_tables(regs->pc))
goto bad_area_nosemaphore;
down_read(&mm->mmap_sem);
}
vma = find_vma(mm, address);
if (!vma)
goto bad_area;
if (vma->vm_start <= address)
goto good_area;
if (!(vma->vm_flags & VM_GROWSDOWN))
goto bad_area;
if (!is_write)
goto bad_area;
/*
* N.B. The ABI allows programs to access up to
* a few hundred bytes below the stack pointer (TBD).
* The kernel signal delivery code writes up to about 1.5kB
* below the stack pointer (r1) before decrementing it.
* The exec code can write slightly over 640kB to the stack
* before setting the user r1. Thus we allow the stack to
* expand to 1MB without further checks.
*/
if (address + 0x100000 < vma->vm_end) {
/* get user regs even if this fault is in kernel mode */
struct pt_regs *uregs = current->thread.regs;
if (uregs == NULL)
goto bad_area;
/*
* A user-mode access to an address a long way below
* the stack pointer is only valid if the instruction
* is one which would update the stack pointer to the
* address accessed if the instruction completed,
* i.e. either stwu rs,n(r1) or stwux rs,r1,rb
* (or the byte, halfword, float or double forms).
*
* If we don't check this then any write to the area
* between the last mapped region and the stack will
* expand the stack rather than segfaulting.
*/
if (address + 2048 < uregs->r1
&& (kernel_mode(regs) || !store_updates_sp(regs)))
goto bad_area;
}
if (expand_stack(vma, address))
goto bad_area;
good_area:
code = SEGV_ACCERR;
/* a write */
if (is_write) {
if (!(vma->vm_flags & VM_WRITE))
goto bad_area;
/* a read */
} else {
/* protection fault */
if (error_code & 0x08000000)
goto bad_area;
if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
goto bad_area;
}
/*
* If for any reason at all we couldn't handle the fault,
* make sure we exit gracefully rather than endlessly redo
* the fault.
*/
survive:
fault = handle_mm_fault(mm, vma, address, is_write);
if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM)
goto out_of_memory;
else if (fault & VM_FAULT_SIGBUS)
goto do_sigbus;
BUG();
}
if (fault & VM_FAULT_MAJOR)
current->maj_flt++;
else
current->min_flt++;
up_read(&mm->mmap_sem);
/*
* keep track of tlb+htab misses that are good addrs but
* just need pte's created via handle_mm_fault()
* -- Cort
*/
pte_misses++;
return;
bad_area:
up_read(&mm->mmap_sem);
bad_area_nosemaphore:
pte_errors++;
/* User mode accesses cause a SIGSEGV */
if (user_mode(regs)) {
_exception(SIGSEGV, regs, code, address);
/* info.si_signo = SIGSEGV;
info.si_errno = 0;
info.si_code = code;
info.si_addr = (void *) address;
force_sig_info(SIGSEGV, &info, current);*/
return;
}
bad_page_fault(regs, address, SIGSEGV);
return;
/*
* We ran out of memory, or some other thing happened to us that made
* us unable to handle the page fault gracefully.
*/
out_of_memory:
if (current->pid == 1) {
yield();
down_read(&mm->mmap_sem);
goto survive;
}
up_read(&mm->mmap_sem);
printk(KERN_WARNING "VM: killing process %s\n", current->comm);
if (user_mode(regs))
do_exit(SIGKILL);
bad_page_fault(regs, address, SIGKILL);
return;
do_sigbus:
up_read(&mm->mmap_sem);
if (user_mode(regs)) {
info.si_signo = SIGBUS;
info.si_errno = 0;
info.si_code = BUS_ADRERR;
info.si_addr = (void __user *)address;
force_sig_info(SIGBUS, &info, current);
return;
}
bad_page_fault(regs, address, SIGBUS);
}
...@@ -23,8 +23,16 @@ ...@@ -23,8 +23,16 @@
#include <asm/sections.h> #include <asm/sections.h>
#include <asm/tlb.h> #include <asm/tlb.h>
#ifndef CONFIG_MMU
unsigned int __page_offset; unsigned int __page_offset;
/* EXPORT_SYMBOL(__page_offset); */ EXPORT_SYMBOL(__page_offset);
#else
DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
int mem_init_done;
static int init_bootmem_done;
#endif /* CONFIG_MMU */
char *klimit = _end; char *klimit = _end;
...@@ -32,28 +40,26 @@ char *klimit = _end; ...@@ -32,28 +40,26 @@ char *klimit = _end;
* Initialize the bootmem system and give it all the memory we * Initialize the bootmem system and give it all the memory we
* have available. * have available.
*/ */
unsigned int memory_start; unsigned long memory_start;
unsigned int memory_end; /* due to mm/nommu.c */ unsigned long memory_end; /* due to mm/nommu.c */
unsigned int memory_size; unsigned long memory_size;
/* /*
* paging_init() sets up the page tables - in fact we've already done this. * paging_init() sets up the page tables - in fact we've already done this.
*/ */
static void __init paging_init(void) static void __init paging_init(void)
{ {
int i;
unsigned long zones_size[MAX_NR_ZONES]; unsigned long zones_size[MAX_NR_ZONES];
/* Clean every zones */
memset(zones_size, 0, sizeof(zones_size));
/* /*
* old: we can DMA to/from any address.put all page into ZONE_DMA * old: we can DMA to/from any address.put all page into ZONE_DMA
* We use only ZONE_NORMAL * We use only ZONE_NORMAL
*/ */
zones_size[ZONE_NORMAL] = max_mapnr; zones_size[ZONE_NORMAL] = max_mapnr;
/* every other zones are empty */
for (i = 1; i < MAX_NR_ZONES; i++)
zones_size[i] = 0;
free_area_init(zones_size); free_area_init(zones_size);
} }
...@@ -61,6 +67,7 @@ void __init setup_memory(void) ...@@ -61,6 +67,7 @@ void __init setup_memory(void)
{ {
int i; int i;
unsigned long map_size; unsigned long map_size;
#ifndef CONFIG_MMU
u32 kernel_align_start, kernel_align_size; u32 kernel_align_start, kernel_align_size;
/* Find main memory where is the kernel */ /* Find main memory where is the kernel */
...@@ -93,6 +100,7 @@ void __init setup_memory(void) ...@@ -93,6 +100,7 @@ void __init setup_memory(void)
__func__, kernel_align_start, kernel_align_start __func__, kernel_align_start, kernel_align_start
+ kernel_align_size, kernel_align_size); + kernel_align_size, kernel_align_size);
#endif
/* /*
* Kernel: * Kernel:
* start: base phys address of kernel - page align * start: base phys address of kernel - page align
...@@ -121,9 +129,13 @@ void __init setup_memory(void) ...@@ -121,9 +129,13 @@ void __init setup_memory(void)
* for 4GB of memory, using 4kB pages), plus 1 page * for 4GB of memory, using 4kB pages), plus 1 page
* (in case the address isn't page-aligned). * (in case the address isn't page-aligned).
*/ */
#ifndef CONFIG_MMU
map_size = init_bootmem_node(NODE_DATA(0), PFN_UP(TOPHYS((u32)_end)), map_size = init_bootmem_node(NODE_DATA(0), PFN_UP(TOPHYS((u32)_end)),
min_low_pfn, max_low_pfn); min_low_pfn, max_low_pfn);
#else
map_size = init_bootmem_node(&contig_page_data,
PFN_UP(TOPHYS((u32)_end)), min_low_pfn, max_low_pfn);
#endif
lmb_reserve(PFN_UP(TOPHYS((u32)_end)) << PAGE_SHIFT, map_size); lmb_reserve(PFN_UP(TOPHYS((u32)_end)) << PAGE_SHIFT, map_size);
/* free bootmem is whole main memory */ /* free bootmem is whole main memory */
...@@ -137,6 +149,9 @@ void __init setup_memory(void) ...@@ -137,6 +149,9 @@ void __init setup_memory(void)
reserve_bootmem(lmb.reserved.region[i].base, reserve_bootmem(lmb.reserved.region[i].base,
lmb_size_bytes(&lmb.reserved, i) - 1, BOOTMEM_DEFAULT); lmb_size_bytes(&lmb.reserved, i) - 1, BOOTMEM_DEFAULT);
} }
#ifdef CONFIG_MMU
init_bootmem_done = 1;
#endif
paging_init(); paging_init();
} }
...@@ -191,11 +206,145 @@ void __init mem_init(void) ...@@ -191,11 +206,145 @@ void __init mem_init(void)
printk(KERN_INFO "Memory: %luk/%luk available\n", printk(KERN_INFO "Memory: %luk/%luk available\n",
(unsigned long) nr_free_pages() << (PAGE_SHIFT-10), (unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
num_physpages << (PAGE_SHIFT-10)); num_physpages << (PAGE_SHIFT-10));
#ifdef CONFIG_MMU
mem_init_done = 1;
#endif
} }
#ifndef CONFIG_MMU
/* Check against bounds of physical memory */ /* Check against bounds of physical memory */
int ___range_ok(unsigned long addr, unsigned long size) int ___range_ok(unsigned long addr, unsigned long size)
{ {
return ((addr < memory_start) || return ((addr < memory_start) ||
((addr + size) > memory_end)); ((addr + size) > memory_end));
} }
EXPORT_SYMBOL(___range_ok);
#else
int page_is_ram(unsigned long pfn)
{
return pfn < max_low_pfn;
}
/*
* Check for command-line options that affect what MMU_init will do.
*/
static void mm_cmdline_setup(void)
{
unsigned long maxmem = 0;
char *p = cmd_line;
/* Look for mem= option on command line */
p = strstr(cmd_line, "mem=");
if (p) {
p += 4;
maxmem = memparse(p, &p);
if (maxmem && memory_size > maxmem) {
memory_size = maxmem;
memory_end = memory_start + memory_size;
lmb.memory.region[0].size = memory_size;
}
}
}
/*
* MMU_init_hw does the chip-specific initialization of the MMU hardware.
*/
static void __init mmu_init_hw(void)
{
/*
* The Zone Protection Register (ZPR) defines how protection will
* be applied to every page which is a member of a given zone. At
* present, we utilize only two of the zones.
* The zone index bits (of ZSEL) in the PTE are used for software
* indicators, except the LSB. For user access, zone 1 is used,
* for kernel access, zone 0 is used. We set all but zone 1
* to zero, allowing only kernel access as indicated in the PTE.
* For zone 1, we set a 01 binary (a value of 10 will not work)
* to allow user access as indicated in the PTE. This also allows
* kernel access as indicated in the PTE.
*/
__asm__ __volatile__ ("ori r11, r0, 0x10000000;" \
"mts rzpr, r11;"
: : : "r11");
}
/*
* MMU_init sets up the basic memory mappings for the kernel,
* including both RAM and possibly some I/O regions,
* and sets up the page tables and the MMU hardware ready to go.
*/
/* called from head.S */
asmlinkage void __init mmu_init(void)
{
unsigned int kstart, ksize;
if (!lmb.reserved.cnt) {
printk(KERN_EMERG "Error memory count\n");
machine_restart(NULL);
}
if ((u32) lmb.memory.region[0].size < 0x1000000) {
printk(KERN_EMERG "Memory must be greater than 16MB\n");
machine_restart(NULL);
}
/* Find main memory where the kernel is */
memory_start = (u32) lmb.memory.region[0].base;
memory_end = (u32) lmb.memory.region[0].base +
(u32) lmb.memory.region[0].size;
memory_size = memory_end - memory_start;
mm_cmdline_setup(); /* FIXME parse args from command line - not used */
/*
* Map out the kernel text/data/bss from the available physical
* memory.
*/
kstart = __pa(CONFIG_KERNEL_START); /* kernel start */
/* kernel size */
ksize = PAGE_ALIGN(((u32)_end - (u32)CONFIG_KERNEL_START));
lmb_reserve(kstart, ksize);
#if defined(CONFIG_BLK_DEV_INITRD)
/* Remove the init RAM disk from the available memory. */
/* if (initrd_start) {
mem_pieces_remove(&phys_avail, __pa(initrd_start),
initrd_end - initrd_start, 1);
}*/
#endif /* CONFIG_BLK_DEV_INITRD */
/* Initialize the MMU hardware */
mmu_init_hw();
/* Map in all of RAM starting at CONFIG_KERNEL_START */
mapin_ram();
#ifdef HIGHMEM_START_BOOL
ioremap_base = HIGHMEM_START;
#else
ioremap_base = 0xfe000000UL; /* for now, could be 0xfffff000 */
#endif /* CONFIG_HIGHMEM */
ioremap_bot = ioremap_base;
/* Initialize the context management stuff */
mmu_context_init();
}
/* This is only called until mem_init is done. */
void __init *early_get_page(void)
{
void *p;
if (init_bootmem_done) {
p = alloc_bootmem_pages(PAGE_SIZE);
} else {
/*
* Mem start + 32MB -> here is limit
* because of mem mapping from head.S
*/
p = __va(lmb_alloc_base(PAGE_SIZE, PAGE_SIZE,
memory_start + 0x2000000));
}
return p;
}
#endif /* CONFIG_MMU */
/*
* This file contains the routines for handling the MMU.
*
* Copyright (C) 2007 Xilinx, Inc. All rights reserved.
*
* Derived from arch/ppc/mm/4xx_mmu.c:
* -- paulus
*
* Derived from arch/ppc/mm/init.c:
* Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
*
* Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
* and Cort Dougan (PReP) (cort@cs.nmt.edu)
* Copyright (C) 1996 Paul Mackerras
* Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk).
*
* Derived from "arch/i386/mm/init.c"
* Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
*/
#include <linux/mm.h>
#include <linux/init.h>
#include <asm/tlbflush.h>
#include <asm/mmu_context.h>
mm_context_t next_mmu_context;
unsigned long context_map[LAST_CONTEXT / BITS_PER_LONG + 1];
atomic_t nr_free_contexts;
struct mm_struct *context_mm[LAST_CONTEXT+1];
/*
* Initialize the context management stuff.
*/
void __init mmu_context_init(void)
{
/*
* The use of context zero is reserved for the kernel.
* This code assumes FIRST_CONTEXT < 32.
*/
context_map[0] = (1 << FIRST_CONTEXT) - 1;
next_mmu_context = FIRST_CONTEXT;
atomic_set(&nr_free_contexts, LAST_CONTEXT - FIRST_CONTEXT + 1);
}
/*
* Steal a context from a task that has one at the moment.
*
* This isn't an LRU system, it just frees up each context in
* turn (sort-of pseudo-random replacement :). This would be the
* place to implement an LRU scheme if anyone were motivated to do it.
*/
void steal_context(void)
{
struct mm_struct *mm;
/* free up context `next_mmu_context' */
/* if we shouldn't free context 0, don't... */
if (next_mmu_context < FIRST_CONTEXT)
next_mmu_context = FIRST_CONTEXT;
mm = context_mm[next_mmu_context];
flush_tlb_mm(mm);
destroy_context(mm);
}
/*
* This file contains the routines setting up the linux page tables.
*
* Copyright (C) 2008 Michal Simek
* Copyright (C) 2008 PetaLogix
*
* Copyright (C) 2007 Xilinx, Inc. All rights reserved.
*
* Derived from arch/ppc/mm/pgtable.c:
* -- paulus
*
* Derived from arch/ppc/mm/init.c:
* Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
*
* Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
* and Cort Dougan (PReP) (cort@cs.nmt.edu)
* Copyright (C) 1996 Paul Mackerras
* Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk).
*
* Derived from "arch/i386/mm/init.c"
* Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
*
* This file is subject to the terms and conditions of the GNU General
* Public License. See the file COPYING in the main directory of this
* archive for more details.
*
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/vmalloc.h>
#include <linux/init.h>
#include <asm/pgtable.h>
#include <asm/pgalloc.h>
#include <linux/io.h>
#include <asm/mmu.h>
#include <asm/sections.h>
#define flush_HPTE(X, va, pg) _tlbie(va)
unsigned long ioremap_base;
unsigned long ioremap_bot;
/* The maximum lowmem defaults to 768Mb, but this can be configured to
* another value.
*/
#define MAX_LOW_MEM CONFIG_LOWMEM_SIZE
#ifndef CONFIG_SMP
struct pgtable_cache_struct quicklists;
#endif
static void __iomem *__ioremap(phys_addr_t addr, unsigned long size,
unsigned long flags)
{
unsigned long v, i;
phys_addr_t p;
int err;
/*
* Choose an address to map it to.
* Once the vmalloc system is running, we use it.
* Before then, we use space going down from ioremap_base
* (ioremap_bot records where we're up to).
*/
p = addr & PAGE_MASK;
size = PAGE_ALIGN(addr + size) - p;
/*
* Don't allow anybody to remap normal RAM that we're using.
* mem_init() sets high_memory so only do the check after that.
*
* However, allow remap of rootfs: TBD
*/
if (mem_init_done &&
p >= memory_start && p < virt_to_phys(high_memory) &&
!(p >= virt_to_phys((unsigned long)&__bss_stop) &&
p < virt_to_phys((unsigned long)__bss_stop))) {
printk(KERN_WARNING "__ioremap(): phys addr "PTE_FMT
" is RAM lr %p\n", (unsigned long)p,
__builtin_return_address(0));
return NULL;
}
if (size == 0)
return NULL;
/*
* Is it already mapped? If the whole area is mapped then we're
* done, otherwise remap it since we want to keep the virt addrs for
* each request contiguous.
*
* We make the assumption here that if the bottom and top
* of the range we want are mapped then it's mapped to the
* same virt address (and this is contiguous).
* -- Cort
*/
if (mem_init_done) {
struct vm_struct *area;
area = get_vm_area(size, VM_IOREMAP);
if (area == NULL)
return NULL;
v = VMALLOC_VMADDR(area->addr);
} else {
v = (ioremap_bot -= size);
}
if ((flags & _PAGE_PRESENT) == 0)
flags |= _PAGE_KERNEL;
if (flags & _PAGE_NO_CACHE)
flags |= _PAGE_GUARDED;
err = 0;
for (i = 0; i < size && err == 0; i += PAGE_SIZE)
err = map_page(v + i, p + i, flags);
if (err) {
if (mem_init_done)
vfree((void *)v);
return NULL;
}
return (void __iomem *) (v + ((unsigned long)addr & ~PAGE_MASK));
}
void __iomem *ioremap(phys_addr_t addr, unsigned long size)
{
return __ioremap(addr, size, _PAGE_NO_CACHE);
}
EXPORT_SYMBOL(ioremap);
void iounmap(void *addr)
{
if (addr > high_memory && (unsigned long) addr < ioremap_bot)
vfree((void *) (PAGE_MASK & (unsigned long) addr));
}
EXPORT_SYMBOL(iounmap);
int map_page(unsigned long va, phys_addr_t pa, int flags)
{
pmd_t *pd;
pte_t *pg;
int err = -ENOMEM;
/* spin_lock(&init_mm.page_table_lock); */
/* Use upper 10 bits of VA to index the first level map */
pd = pmd_offset(pgd_offset_k(va), va);
/* Use middle 10 bits of VA to index the second-level map */
pg = pte_alloc_kernel(pd, va); /* from powerpc - pgtable.c */
/* pg = pte_alloc_kernel(&init_mm, pd, va); */
if (pg != NULL) {
err = 0;
set_pte_at(&init_mm, va, pg, pfn_pte(pa >> PAGE_SHIFT,
__pgprot(flags)));
if (mem_init_done)
flush_HPTE(0, va, pmd_val(*pd));
/* flush_HPTE(0, va, pg); */
}
/* spin_unlock(&init_mm.page_table_lock); */
return err;
}
void __init adjust_total_lowmem(void)
{
/* TBD */
#if 0
unsigned long max_low_mem = MAX_LOW_MEM;
if (total_lowmem > max_low_mem) {
total_lowmem = max_low_mem;
#ifndef CONFIG_HIGHMEM
printk(KERN_INFO "Warning, memory limited to %ld Mb, use "
"CONFIG_HIGHMEM to reach %ld Mb\n",
max_low_mem >> 20, total_memory >> 20);
total_memory = total_lowmem;
#endif /* CONFIG_HIGHMEM */
}
#endif
}
static void show_tmem(unsigned long tmem)
{
volatile unsigned long a;
a = a + tmem;
}
/*
* Map in all of physical memory starting at CONFIG_KERNEL_START.
*/
void __init mapin_ram(void)
{
unsigned long v, p, s, f;
v = CONFIG_KERNEL_START;
p = memory_start;
show_tmem(memory_size);
for (s = 0; s < memory_size; s += PAGE_SIZE) {
f = _PAGE_PRESENT | _PAGE_ACCESSED |
_PAGE_SHARED | _PAGE_HWEXEC;
if ((char *) v < _stext || (char *) v >= _etext)
f |= _PAGE_WRENABLE;
else
/* On the MicroBlaze, no user access
forces R/W kernel access */
f |= _PAGE_USER;
map_page(v, p, f);
v += PAGE_SIZE;
p += PAGE_SIZE;
}
}
/* is x a power of 2? */
#define is_power_of_2(x) ((x) != 0 && (((x) & ((x) - 1)) == 0))
/*
* Set up a mapping for a block of I/O.
* virt, phys, size must all be page-aligned.
* This should only be called before ioremap is called.
*/
void __init io_block_mapping(unsigned long virt, phys_addr_t phys,
unsigned int size, int flags)
{
int i;
if (virt > CONFIG_KERNEL_START && virt < ioremap_bot)
ioremap_bot = ioremap_base = virt;
/* Put it in the page tables. */
for (i = 0; i < size; i += PAGE_SIZE)
map_page(virt + i, phys + i, flags);
}
/* Scan the real Linux page tables and return a PTE pointer for
* a virtual address in a context.
* Returns true (1) if PTE was found, zero otherwise. The pointer to
* the PTE pointer is unmodified if PTE is not found.
*/
static int get_pteptr(struct mm_struct *mm, unsigned long addr, pte_t **ptep)
{
pgd_t *pgd;
pmd_t *pmd;
pte_t *pte;
int retval = 0;
pgd = pgd_offset(mm, addr & PAGE_MASK);
if (pgd) {
pmd = pmd_offset(pgd, addr & PAGE_MASK);
if (pmd_present(*pmd)) {
pte = pte_offset_kernel(pmd, addr & PAGE_MASK);
if (pte) {
retval = 1;
*ptep = pte;
}
}
}
return retval;
}
/* Find physical address for this virtual address. Normally used by
* I/O functions, but anyone can call it.
*/
unsigned long iopa(unsigned long addr)
{
unsigned long pa;
pte_t *pte;
struct mm_struct *mm;
/* Allow mapping of user addresses (within the thread)
* for DMA if necessary.
*/
if (addr < TASK_SIZE)
mm = current->mm;
else
mm = &init_mm;
pa = 0;
if (get_pteptr(mm, addr, &pte))
pa = (pte_val(*pte) & PAGE_MASK) | (addr & ~PAGE_MASK);
return pa;
}
...@@ -438,7 +438,7 @@ source "drivers/s390/block/Kconfig" ...@@ -438,7 +438,7 @@ source "drivers/s390/block/Kconfig"
config XILINX_SYSACE config XILINX_SYSACE
tristate "Xilinx SystemACE support" tristate "Xilinx SystemACE support"
depends on 4xx depends on 4xx || MICROBLAZE
help help
Include support for the Xilinx SystemACE CompactFlash interface Include support for the Xilinx SystemACE CompactFlash interface
......
...@@ -906,7 +906,7 @@ config DTLK ...@@ -906,7 +906,7 @@ config DTLK
config XILINX_HWICAP config XILINX_HWICAP
tristate "Xilinx HWICAP Support" tristate "Xilinx HWICAP Support"
depends on XILINX_VIRTEX depends on XILINX_VIRTEX || MICROBLAZE
help help
This option enables support for Xilinx Internal Configuration This option enables support for Xilinx Internal Configuration
Access Port (ICAP) driver. The ICAP is used on Xilinx Virtex Access Port (ICAP) driver. The ICAP is used on Xilinx Virtex
......
...@@ -69,7 +69,7 @@ comment "Memory mapped GPIO expanders:" ...@@ -69,7 +69,7 @@ comment "Memory mapped GPIO expanders:"
config GPIO_XILINX config GPIO_XILINX
bool "Xilinx GPIO support" bool "Xilinx GPIO support"
depends on PPC_OF depends on PPC_OF || MICROBLAZE
help help
Say yes here to support the Xilinx FPGA GPIO device Say yes here to support the Xilinx FPGA GPIO device
......
...@@ -192,7 +192,7 @@ config SERIO_RAW ...@@ -192,7 +192,7 @@ config SERIO_RAW
config SERIO_XILINX_XPS_PS2 config SERIO_XILINX_XPS_PS2
tristate "Xilinx XPS PS/2 Controller Support" tristate "Xilinx XPS PS/2 Controller Support"
depends on PPC depends on PPC || MICROBLAZE
help help
This driver supports XPS PS/2 IP from the Xilinx EDK on This driver supports XPS PS/2 IP from the Xilinx EDK on
PowerPC platform. PowerPC platform.
......
config OF_DEVICE config OF_DEVICE
def_bool y def_bool y
depends on OF && (SPARC || PPC_OF) depends on OF && (SPARC || PPC_OF || MICROBLAZE)
config OF_GPIO config OF_GPIO
def_bool y def_bool y
depends on OF && PPC_OF && GPIOLIB depends on OF && (PPC_OF || MICROBLAZE) && GPIOLIB
help help
OpenFirmware GPIO accessors OpenFirmware GPIO accessors
config OF_I2C config OF_I2C
def_tristate I2C def_tristate I2C
depends on PPC_OF && I2C depends on (PPC_OF || MICROBLAZE) && I2C
help help
OpenFirmware I2C accessors OpenFirmware I2C accessors
config OF_SPI config OF_SPI
def_tristate SPI def_tristate SPI
depends on OF && PPC_OF && SPI depends on OF && (PPC_OF || MICROBLAZE) && SPI
help help
OpenFirmware SPI accessors OpenFirmware SPI accessors
...@@ -212,7 +212,7 @@ config SPI_TXX9 ...@@ -212,7 +212,7 @@ config SPI_TXX9
config SPI_XILINX config SPI_XILINX
tristate "Xilinx SPI controller" tristate "Xilinx SPI controller"
depends on XILINX_VIRTEX && EXPERIMENTAL depends on (XILINX_VIRTEX || MICROBLAZE) && EXPERIMENTAL
select SPI_BITBANG select SPI_BITBANG
help help
This exposes the SPI controller IP from the Xilinx EDK. This exposes the SPI controller IP from the Xilinx EDK.
......
...@@ -22,6 +22,7 @@ config USB_ARCH_HAS_HCD ...@@ -22,6 +22,7 @@ config USB_ARCH_HAS_HCD
default y if PCMCIA && !M32R # sl811_cs default y if PCMCIA && !M32R # sl811_cs
default y if ARM # SL-811 default y if ARM # SL-811
default y if SUPERH # r8a66597-hcd default y if SUPERH # r8a66597-hcd
default y if MICROBLAZE
default PCI default PCI
# many non-PCI SOC chips embed OHCI # many non-PCI SOC chips embed OHCI
......
...@@ -1996,7 +1996,7 @@ config FB_PS3_DEFAULT_SIZE_M ...@@ -1996,7 +1996,7 @@ config FB_PS3_DEFAULT_SIZE_M
config FB_XILINX config FB_XILINX
tristate "Xilinx frame buffer support" tristate "Xilinx frame buffer support"
depends on FB && XILINX_VIRTEX depends on FB && (XILINX_VIRTEX || MICROBLAZE)
select FB_CFB_FILLRECT select FB_CFB_FILLRECT
select FB_CFB_COPYAREA select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT select FB_CFB_IMAGEBLIT
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment