Commit 41884629 authored by Russell King's avatar Russell King

Merge branches 'clkdev', 'fixes', 'misc' and 'sa1100-base' into for-linus

......@@ -8,7 +8,6 @@ generic-y += early_ioremap.h
generic-y += emergency-restart.h
generic-y += errno.h
generic-y += exec.h
generic-y += export.h
generic-y += ioctl.h
generic-y += ipcbuf.h
generic-y += irq_regs.h
......
......@@ -9,6 +9,33 @@
#include <asm/memory.h>
#include <asm/param.h> /* HZ */
/*
* Loop (or tick) based delay:
*
* loops = loops_per_jiffy * jiffies_per_sec * delay_us / us_per_sec
*
* where:
*
* jiffies_per_sec = HZ
* us_per_sec = 1000000
*
* Therefore the constant part is HZ / 1000000 which is a small
* fractional number. To make this usable with integer math, we
* scale up this constant by 2^31, perform the actual multiplication,
* and scale the result back down by 2^31 with a simple shift:
*
* loops = (loops_per_jiffy * delay_us * UDELAY_MULT) >> 31
*
* where:
*
* UDELAY_MULT = 2^31 * HZ / 1000000
* = (2^31 / 1000000) * HZ
* = 2147.483648 * HZ
* = 2147 * HZ + 483648 * HZ / 1000000
*
* 31 is the biggest scale shift value that won't overflow 32 bits for
* delay_us * UDELAY_MULT assuming HZ <= 1000 and delay_us <= 2000.
*/
#define MAX_UDELAY_MS 2
#define UDELAY_MULT UL(2147 * HZ + 483648 * HZ / 1000000)
#define UDELAY_SHIFT 31
......
......@@ -19,7 +19,7 @@
* This may need to be greater than __NR_last_syscall+1 in order to
* account for the padding in the syscall table
*/
#define __NR_syscalls (396)
#define __NR_syscalls (400)
#define __ARCH_WANT_STAT64
#define __ARCH_WANT_SYS_GETHOSTNAME
......
......@@ -420,6 +420,9 @@
#define __NR_copy_file_range (__NR_SYSCALL_BASE+391)
#define __NR_preadv2 (__NR_SYSCALL_BASE+392)
#define __NR_pwritev2 (__NR_SYSCALL_BASE+393)
#define __NR_pkey_mprotect (__NR_SYSCALL_BASE+394)
#define __NR_pkey_alloc (__NR_SYSCALL_BASE+395)
#define __NR_pkey_free (__NR_SYSCALL_BASE+396)
/*
* The following SWIs are ARM private.
......
......@@ -33,7 +33,7 @@ endif
obj-$(CONFIG_CPU_IDLE) += cpuidle.o
obj-$(CONFIG_ISA_DMA_API) += dma.o
obj-$(CONFIG_FIQ) += fiq.o fiqasm.o
obj-$(CONFIG_MODULES) += module.o
obj-$(CONFIG_MODULES) += armksyms.o module.o
obj-$(CONFIG_ARM_MODULE_PLTS) += module-plts.o
obj-$(CONFIG_ISA_DMA) += dma-isa.o
obj-$(CONFIG_PCI) += bios32.o isa.o
......
/*
* linux/arch/arm/kernel/armksyms.c
*
* Copyright (C) 2000 Russell King
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/export.h>
#include <linux/sched.h>
#include <linux/string.h>
#include <linux/cryptohash.h>
#include <linux/delay.h>
#include <linux/in6.h>
#include <linux/syscalls.h>
#include <linux/uaccess.h>
#include <linux/io.h>
#include <linux/arm-smccc.h>
#include <asm/checksum.h>
#include <asm/ftrace.h>
/*
* libgcc functions - functions that are used internally by the
* compiler... (prototypes are not correct though, but that
* doesn't really matter since they're not versioned).
*/
extern void __ashldi3(void);
extern void __ashrdi3(void);
extern void __divsi3(void);
extern void __lshrdi3(void);
extern void __modsi3(void);
extern void __muldi3(void);
extern void __ucmpdi2(void);
extern void __udivsi3(void);
extern void __umodsi3(void);
extern void __do_div64(void);
extern void __bswapsi2(void);
extern void __bswapdi2(void);
extern void __aeabi_idiv(void);
extern void __aeabi_idivmod(void);
extern void __aeabi_lasr(void);
extern void __aeabi_llsl(void);
extern void __aeabi_llsr(void);
extern void __aeabi_lmul(void);
extern void __aeabi_uidiv(void);
extern void __aeabi_uidivmod(void);
extern void __aeabi_ulcmp(void);
extern void fpundefinstr(void);
void mmioset(void *, unsigned int, size_t);
void mmiocpy(void *, const void *, size_t);
/* platform dependent support */
EXPORT_SYMBOL(arm_delay_ops);
/* networking */
EXPORT_SYMBOL(csum_partial);
EXPORT_SYMBOL(csum_partial_copy_from_user);
EXPORT_SYMBOL(csum_partial_copy_nocheck);
EXPORT_SYMBOL(__csum_ipv6_magic);
/* io */
#ifndef __raw_readsb
EXPORT_SYMBOL(__raw_readsb);
#endif
#ifndef __raw_readsw
EXPORT_SYMBOL(__raw_readsw);
#endif
#ifndef __raw_readsl
EXPORT_SYMBOL(__raw_readsl);
#endif
#ifndef __raw_writesb
EXPORT_SYMBOL(__raw_writesb);
#endif
#ifndef __raw_writesw
EXPORT_SYMBOL(__raw_writesw);
#endif
#ifndef __raw_writesl
EXPORT_SYMBOL(__raw_writesl);
#endif
/* string / mem functions */
EXPORT_SYMBOL(strchr);
EXPORT_SYMBOL(strrchr);
EXPORT_SYMBOL(memset);
EXPORT_SYMBOL(memcpy);
EXPORT_SYMBOL(memmove);
EXPORT_SYMBOL(memchr);
EXPORT_SYMBOL(__memzero);
EXPORT_SYMBOL(mmioset);
EXPORT_SYMBOL(mmiocpy);
#ifdef CONFIG_MMU
EXPORT_SYMBOL(copy_page);
EXPORT_SYMBOL(arm_copy_from_user);
EXPORT_SYMBOL(arm_copy_to_user);
EXPORT_SYMBOL(arm_clear_user);
EXPORT_SYMBOL(__get_user_1);
EXPORT_SYMBOL(__get_user_2);
EXPORT_SYMBOL(__get_user_4);
EXPORT_SYMBOL(__get_user_8);
#ifdef __ARMEB__
EXPORT_SYMBOL(__get_user_64t_1);
EXPORT_SYMBOL(__get_user_64t_2);
EXPORT_SYMBOL(__get_user_64t_4);
EXPORT_SYMBOL(__get_user_32t_8);
#endif
EXPORT_SYMBOL(__put_user_1);
EXPORT_SYMBOL(__put_user_2);
EXPORT_SYMBOL(__put_user_4);
EXPORT_SYMBOL(__put_user_8);
#endif
/* gcc lib functions */
EXPORT_SYMBOL(__ashldi3);
EXPORT_SYMBOL(__ashrdi3);
EXPORT_SYMBOL(__divsi3);
EXPORT_SYMBOL(__lshrdi3);
EXPORT_SYMBOL(__modsi3);
EXPORT_SYMBOL(__muldi3);
EXPORT_SYMBOL(__ucmpdi2);
EXPORT_SYMBOL(__udivsi3);
EXPORT_SYMBOL(__umodsi3);
EXPORT_SYMBOL(__do_div64);
EXPORT_SYMBOL(__bswapsi2);
EXPORT_SYMBOL(__bswapdi2);
#ifdef CONFIG_AEABI
EXPORT_SYMBOL(__aeabi_idiv);
EXPORT_SYMBOL(__aeabi_idivmod);
EXPORT_SYMBOL(__aeabi_lasr);
EXPORT_SYMBOL(__aeabi_llsl);
EXPORT_SYMBOL(__aeabi_llsr);
EXPORT_SYMBOL(__aeabi_lmul);
EXPORT_SYMBOL(__aeabi_uidiv);
EXPORT_SYMBOL(__aeabi_uidivmod);
EXPORT_SYMBOL(__aeabi_ulcmp);
#endif
/* bitops */
EXPORT_SYMBOL(_set_bit);
EXPORT_SYMBOL(_test_and_set_bit);
EXPORT_SYMBOL(_clear_bit);
EXPORT_SYMBOL(_test_and_clear_bit);
EXPORT_SYMBOL(_change_bit);
EXPORT_SYMBOL(_test_and_change_bit);
EXPORT_SYMBOL(_find_first_zero_bit_le);
EXPORT_SYMBOL(_find_next_zero_bit_le);
EXPORT_SYMBOL(_find_first_bit_le);
EXPORT_SYMBOL(_find_next_bit_le);
#ifdef __ARMEB__
EXPORT_SYMBOL(_find_first_zero_bit_be);
EXPORT_SYMBOL(_find_next_zero_bit_be);
EXPORT_SYMBOL(_find_first_bit_be);
EXPORT_SYMBOL(_find_next_bit_be);
#endif
#ifdef CONFIG_FUNCTION_TRACER
#ifdef CONFIG_OLD_MCOUNT
EXPORT_SYMBOL(mcount);
#endif
EXPORT_SYMBOL(__gnu_mcount_nc);
#endif
#ifdef CONFIG_ARM_PATCH_PHYS_VIRT
EXPORT_SYMBOL(__pv_phys_pfn_offset);
EXPORT_SYMBOL(__pv_offset);
#endif
#ifdef CONFIG_HAVE_ARM_SMCCC
EXPORT_SYMBOL(arm_smccc_smc);
EXPORT_SYMBOL(arm_smccc_hvc);
#endif
......@@ -403,6 +403,9 @@
CALL(sys_copy_file_range)
CALL(sys_preadv2)
CALL(sys_pwritev2)
CALL(sys_pkey_mprotect)
/* 395 */ CALL(sys_pkey_alloc)
CALL(sys_pkey_free)
#ifndef syscalls_counted
.equ syscalls_padding, ((NR_syscalls + 3) & ~3) - NR_syscalls
#define syscalls_counted
......
......@@ -7,7 +7,6 @@
#include <asm/assembler.h>
#include <asm/ftrace.h>
#include <asm/unwind.h>
#include <asm/export.h>
#include "entry-header.S"
......@@ -154,7 +153,6 @@ ENTRY(mcount)
__mcount _old
#endif
ENDPROC(mcount)
EXPORT_SYMBOL(mcount)
#ifdef CONFIG_DYNAMIC_FTRACE
ENTRY(ftrace_caller_old)
......@@ -207,7 +205,6 @@ UNWIND(.fnstart)
#endif
UNWIND(.fnend)
ENDPROC(__gnu_mcount_nc)
EXPORT_SYMBOL(__gnu_mcount_nc)
#ifdef CONFIG_DYNAMIC_FTRACE
ENTRY(ftrace_caller)
......
......@@ -22,7 +22,6 @@
#include <asm/memory.h>
#include <asm/thread_info.h>
#include <asm/pgtable.h>
#include <asm/export.h>
#if defined(CONFIG_DEBUG_LL) && !defined(CONFIG_DEBUG_SEMIHOSTING)
#include CONFIG_DEBUG_LL_INCLUDE
......@@ -728,8 +727,6 @@ __pv_phys_pfn_offset:
__pv_offset:
.quad 0
.size __pv_offset, . -__pv_offset
EXPORT_SYMBOL(__pv_phys_pfn_offset)
EXPORT_SYMBOL(__pv_offset)
#endif
#include "head-common.S"
......@@ -16,7 +16,6 @@
#include <asm/opcodes-sec.h>
#include <asm/opcodes-virt.h>
#include <asm/unwind.h>
#include <asm/export.h>
/*
* Wrap c macros in asm macros to delay expansion until after the
......@@ -52,7 +51,6 @@ UNWIND( .fnend)
ENTRY(arm_smccc_smc)
SMCCC SMCCC_SMC
ENDPROC(arm_smccc_smc)
EXPORT_SYMBOL(arm_smccc_smc)
/*
* void smccc_hvc(unsigned long a0, unsigned long a1, unsigned long a2,
......@@ -62,4 +60,3 @@ EXPORT_SYMBOL(arm_smccc_smc)
ENTRY(arm_smccc_hvc)
SMCCC SMCCC_HVC
ENDPROC(arm_smccc_hvc)
EXPORT_SYMBOL(arm_smccc_hvc)
......@@ -12,6 +12,7 @@
*/
#include <linux/cpu.h>
#include <linux/cpufreq.h>
#include <linux/cpumask.h>
#include <linux/export.h>
#include <linux/init.h>
......@@ -21,7 +22,9 @@
#include <linux/of.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <asm/cpu.h>
#include <asm/cputype.h>
#include <asm/topology.h>
......@@ -41,6 +44,7 @@
* updated during this sequence.
*/
static DEFINE_PER_CPU(unsigned long, cpu_scale) = SCHED_CAPACITY_SCALE;
static DEFINE_MUTEX(cpu_scale_mutex);
unsigned long arch_scale_cpu_capacity(struct sched_domain *sd, int cpu)
{
......@@ -52,6 +56,65 @@ static void set_capacity_scale(unsigned int cpu, unsigned long capacity)
per_cpu(cpu_scale, cpu) = capacity;
}
#ifdef CONFIG_PROC_SYSCTL
static ssize_t cpu_capacity_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct cpu *cpu = container_of(dev, struct cpu, dev);
return sprintf(buf, "%lu\n",
arch_scale_cpu_capacity(NULL, cpu->dev.id));
}
static ssize_t cpu_capacity_store(struct device *dev,
struct device_attribute *attr,
const char *buf,
size_t count)
{
struct cpu *cpu = container_of(dev, struct cpu, dev);
int this_cpu = cpu->dev.id, i;
unsigned long new_capacity;
ssize_t ret;
if (count) {
ret = kstrtoul(buf, 0, &new_capacity);
if (ret)
return ret;
if (new_capacity > SCHED_CAPACITY_SCALE)
return -EINVAL;
mutex_lock(&cpu_scale_mutex);
for_each_cpu(i, &cpu_topology[this_cpu].core_sibling)
set_capacity_scale(i, new_capacity);
mutex_unlock(&cpu_scale_mutex);
}
return count;
}
static DEVICE_ATTR_RW(cpu_capacity);
static int register_cpu_capacity_sysctl(void)
{
int i;
struct device *cpu;
for_each_possible_cpu(i) {
cpu = get_cpu_device(i);
if (!cpu) {
pr_err("%s: too early to get CPU%d device!\n",
__func__, i);
continue;
}
device_create_file(cpu, &dev_attr_cpu_capacity);
}
return 0;
}
subsys_initcall(register_cpu_capacity_sysctl);
#endif
#ifdef CONFIG_OF
struct cpu_efficiency {
const char *compatible;
......@@ -78,6 +141,146 @@ static unsigned long *__cpu_capacity;
#define cpu_capacity(cpu) __cpu_capacity[cpu]
static unsigned long middle_capacity = 1;
static bool cap_from_dt = true;
static u32 *raw_capacity;
static bool cap_parsing_failed;
static u32 capacity_scale;
static int __init parse_cpu_capacity(struct device_node *cpu_node, int cpu)
{
int ret = 1;
u32 cpu_capacity;
if (cap_parsing_failed)
return !ret;
ret = of_property_read_u32(cpu_node,
"capacity-dmips-mhz",
&cpu_capacity);
if (!ret) {
if (!raw_capacity) {
raw_capacity = kcalloc(num_possible_cpus(),
sizeof(*raw_capacity),
GFP_KERNEL);
if (!raw_capacity) {
pr_err("cpu_capacity: failed to allocate memory for raw capacities\n");
cap_parsing_failed = true;
return !ret;
}
}
capacity_scale = max(cpu_capacity, capacity_scale);
raw_capacity[cpu] = cpu_capacity;
pr_debug("cpu_capacity: %s cpu_capacity=%u (raw)\n",
cpu_node->full_name, raw_capacity[cpu]);
} else {
if (raw_capacity) {
pr_err("cpu_capacity: missing %s raw capacity\n",
cpu_node->full_name);
pr_err("cpu_capacity: partial information: fallback to 1024 for all CPUs\n");
}
cap_parsing_failed = true;
kfree(raw_capacity);
}
return !ret;
}
static void normalize_cpu_capacity(void)
{
u64 capacity;
int cpu;
if (!raw_capacity || cap_parsing_failed)
return;
pr_debug("cpu_capacity: capacity_scale=%u\n", capacity_scale);
mutex_lock(&cpu_scale_mutex);
for_each_possible_cpu(cpu) {
capacity = (raw_capacity[cpu] << SCHED_CAPACITY_SHIFT)
/ capacity_scale;
set_capacity_scale(cpu, capacity);
pr_debug("cpu_capacity: CPU%d cpu_capacity=%lu\n",
cpu, arch_scale_cpu_capacity(NULL, cpu));
}
mutex_unlock(&cpu_scale_mutex);
}
#ifdef CONFIG_CPU_FREQ
static cpumask_var_t cpus_to_visit;
static bool cap_parsing_done;
static void parsing_done_workfn(struct work_struct *work);
static DECLARE_WORK(parsing_done_work, parsing_done_workfn);
static int
init_cpu_capacity_callback(struct notifier_block *nb,
unsigned long val,
void *data)
{
struct cpufreq_policy *policy = data;
int cpu;
if (cap_parsing_failed || cap_parsing_done)
return 0;
switch (val) {
case CPUFREQ_NOTIFY:
pr_debug("cpu_capacity: init cpu capacity for CPUs [%*pbl] (to_visit=%*pbl)\n",
cpumask_pr_args(policy->related_cpus),
cpumask_pr_args(cpus_to_visit));
cpumask_andnot(cpus_to_visit,
cpus_to_visit,
policy->related_cpus);
for_each_cpu(cpu, policy->related_cpus) {
raw_capacity[cpu] = arch_scale_cpu_capacity(NULL, cpu) *
policy->cpuinfo.max_freq / 1000UL;
capacity_scale = max(raw_capacity[cpu], capacity_scale);
}
if (cpumask_empty(cpus_to_visit)) {
normalize_cpu_capacity();
kfree(raw_capacity);
pr_debug("cpu_capacity: parsing done\n");
cap_parsing_done = true;
schedule_work(&parsing_done_work);
}
}
return 0;
}
static struct notifier_block init_cpu_capacity_notifier = {
.notifier_call = init_cpu_capacity_callback,
};
static int __init register_cpufreq_notifier(void)
{
if (cap_parsing_failed)
return -EINVAL;
if (!alloc_cpumask_var(&cpus_to_visit, GFP_KERNEL)) {
pr_err("cpu_capacity: failed to allocate memory for cpus_to_visit\n");
return -ENOMEM;
}
cpumask_copy(cpus_to_visit, cpu_possible_mask);
return cpufreq_register_notifier(&init_cpu_capacity_notifier,
CPUFREQ_POLICY_NOTIFIER);
}
core_initcall(register_cpufreq_notifier);
static void parsing_done_workfn(struct work_struct *work)
{
cpufreq_unregister_notifier(&init_cpu_capacity_notifier,
CPUFREQ_POLICY_NOTIFIER);
}
#else
static int __init free_raw_capacity(void)
{
kfree(raw_capacity);
return 0;
}
core_initcall(free_raw_capacity);
#endif
/*
* Iterate all CPUs' descriptor in DT and compute the efficiency
......@@ -99,6 +302,12 @@ static void __init parse_dt_topology(void)
__cpu_capacity = kcalloc(nr_cpu_ids, sizeof(*__cpu_capacity),
GFP_NOWAIT);
cn = of_find_node_by_path("/cpus");
if (!cn) {
pr_err("No CPU information found in DT\n");
return;
}
for_each_possible_cpu(cpu) {
const u32 *rate;
int len;
......@@ -110,6 +319,13 @@ static void __init parse_dt_topology(void)
continue;
}
if (parse_cpu_capacity(cn, cpu)) {
of_node_put(cn);
continue;
}
cap_from_dt = false;
for (cpu_eff = table_efficiency; cpu_eff->compatible; cpu_eff++)
if (of_device_is_compatible(cn, cpu_eff->compatible))
break;
......@@ -151,6 +367,8 @@ static void __init parse_dt_topology(void)
middle_capacity = ((max_capacity / 3)
>> (SCHED_CAPACITY_SHIFT-1)) + 1;
if (cap_from_dt && !cap_parsing_failed)
normalize_cpu_capacity();
}
/*
......@@ -160,7 +378,7 @@ static void __init parse_dt_topology(void)
*/
static void update_cpu_capacity(unsigned int cpu)
{
if (!cpu_capacity(cpu))
if (!cpu_capacity(cpu) || cap_from_dt)
return;
set_capacity_scale(cpu, cpu_capacity(cpu) / middle_capacity);
......
......@@ -74,6 +74,26 @@ void dump_backtrace_entry(unsigned long where, unsigned long from, unsigned long
dump_mem("", "Exception stack", frame + 4, frame + 4 + sizeof(struct pt_regs));
}
void dump_backtrace_stm(u32 *stack, u32 instruction)
{
char str[80], *p;
unsigned int x;
int reg;
for (reg = 10, x = 0, p = str; reg >= 0; reg--) {
if (instruction & BIT(reg)) {
p += sprintf(p, " r%d:%08x", reg, *stack--);
if (++x == 6) {
x = 0;
p = str;
printk("%s\n", str);
}
}
}
if (p != str)
printk("%s\n", str);
}
#ifndef CONFIG_ARM_UNWIND
/*
* Stack pointers should always be within the kernels view of
......
......@@ -3,6 +3,9 @@
* Written by Martin Mares <mj@atrey.karlin.mff.cuni.cz>
*/
/* No __ro_after_init data in the .rodata section - which will always be ro */
#define RO_AFTER_INIT_DATA
#include <asm-generic/vmlinux.lds.h>
#include <asm/cache.h>
#include <asm/thread_info.h>
......@@ -223,6 +226,8 @@ SECTIONS
. = ALIGN(PAGE_SIZE);
__init_end = .;
*(.data..ro_after_init)
NOSAVE_DATA
CACHELINE_ALIGNED_DATA(L1_CACHE_BYTES)
READ_MOSTLY_DATA(L1_CACHE_BYTES)
......
......@@ -28,7 +28,6 @@ Boston, MA 02110-1301, USA. */
#include <linux/linkage.h>
#include <asm/assembler.h>
#include <asm/export.h>
#ifdef __ARMEB__
#define al r1
......@@ -53,5 +52,3 @@ ENTRY(__aeabi_llsl)
ENDPROC(__ashldi3)
ENDPROC(__aeabi_llsl)
EXPORT_SYMBOL(__ashldi3)
EXPORT_SYMBOL(__aeabi_llsl)
......@@ -28,7 +28,6 @@ Boston, MA 02110-1301, USA. */
#include <linux/linkage.h>
#include <asm/assembler.h>
#include <asm/export.h>
#ifdef __ARMEB__
#define al r1
......@@ -53,5 +52,3 @@ ENTRY(__aeabi_lasr)
ENDPROC(__ashrdi3)
ENDPROC(__aeabi_lasr)
EXPORT_SYMBOL(__ashrdi3)
EXPORT_SYMBOL(__aeabi_lasr)
......@@ -10,6 +10,7 @@
* 27/03/03 Ian Molton Clean up CONFIG_CPU
*
*/
#include <linux/kern_levels.h>
#include <linux/linkage.h>
#include <asm/assembler.h>
.text
......@@ -83,13 +84,13 @@ for_each_frame: tst frame, mask @ Check for address exceptions
teq r3, r1, lsr #11
ldreq r0, [frame, #-8] @ get sp
subeq r0, r0, #4 @ point at the last arg
bleq .Ldumpstm @ dump saved registers
bleq dump_backtrace_stm @ dump saved registers
1004: ldr r1, [sv_pc, #0] @ if stmfd sp!, {..., fp, ip, lr, pc}
ldr r3, .Ldsi @ instruction exists,
teq r3, r1, lsr #11
subeq r0, frame, #16
bleq .Ldumpstm @ dump saved registers
bleq dump_backtrace_stm @ dump saved registers
teq sv_fp, #0 @ zero saved fp means
beq no_frame @ no further frames
......@@ -112,38 +113,6 @@ ENDPROC(c_backtrace)
.long 1004b, 1006b
.popsection
#define instr r4
#define reg r5
#define stack r6
.Ldumpstm: stmfd sp!, {instr, reg, stack, r7, lr}
mov stack, r0
mov instr, r1
mov reg, #10
mov r7, #0
1: mov r3, #1
ARM( tst instr, r3, lsl reg )
THUMB( lsl r3, reg )
THUMB( tst instr, r3 )
beq 2f
add r7, r7, #1
teq r7, #6
moveq r7, #0
adr r3, .Lcr
addne r3, r3, #1 @ skip newline
ldr r2, [stack], #-4
mov r1, reg
adr r0, .Lfp
bl printk
2: subs reg, reg, #1
bpl 1b
teq r7, #0
adrne r0, .Lcr
blne printk
ldmfd sp!, {instr, reg, stack, r7, pc}
.Lfp: .asciz " r%d:%08x%s"
.Lcr: .asciz "\n"
.Lbad: .asciz "Backtrace aborted due to bad frame pointer <%p>\n"
.align
.Ldsi: .word 0xe92dd800 >> 11 @ stmfd sp!, {... fp, ip, lr, pc}
......
#include <asm/assembler.h>
#include <asm/unwind.h>
#include <asm/export.h>
#if __LINUX_ARM_ARCH__ >= 6
.macro bitop, name, instr
......@@ -26,7 +25,6 @@ UNWIND( .fnstart )
bx lr
UNWIND( .fnend )
ENDPROC(\name )
EXPORT_SYMBOL(\name )
.endm
.macro testop, name, instr, store
......@@ -57,7 +55,6 @@ UNWIND( .fnstart )
2: bx lr
UNWIND( .fnend )
ENDPROC(\name )
EXPORT_SYMBOL(\name )
.endm
#else
.macro bitop, name, instr
......@@ -77,7 +74,6 @@ UNWIND( .fnstart )
ret lr
UNWIND( .fnend )
ENDPROC(\name )
EXPORT_SYMBOL(\name )
.endm
/**
......@@ -106,6 +102,5 @@ UNWIND( .fnstart )
ret lr
UNWIND( .fnend )
ENDPROC(\name )
EXPORT_SYMBOL(\name )
.endm
#endif
#include <linux/linkage.h>
#include <asm/assembler.h>
#include <asm/export.h>
#if __LINUX_ARM_ARCH__ >= 6
ENTRY(__bswapsi2)
......@@ -36,5 +35,3 @@ ENTRY(__bswapdi2)
ret lr
ENDPROC(__bswapdi2)
#endif
EXPORT_SYMBOL(__bswapsi2)
EXPORT_SYMBOL(__bswapdi2)
......@@ -10,7 +10,6 @@
#include <linux/linkage.h>
#include <asm/assembler.h>
#include <asm/unwind.h>
#include <asm/export.h>
.text
......@@ -51,9 +50,6 @@ USER( strnebt r2, [r0])
UNWIND(.fnend)
ENDPROC(arm_clear_user)
ENDPROC(__clear_user_std)
#ifndef CONFIG_UACCESS_WITH_MEMCPY
EXPORT_SYMBOL(arm_clear_user)
#endif
.pushsection .text.fixup,"ax"
.align 0
......
......@@ -13,7 +13,6 @@
#include <linux/linkage.h>
#include <asm/assembler.h>
#include <asm/unwind.h>
#include <asm/export.h>
/*
* Prototype:
......@@ -95,7 +94,6 @@ ENTRY(arm_copy_from_user)
#include "copy_template.S"
ENDPROC(arm_copy_from_user)
EXPORT_SYMBOL(arm_copy_from_user)
.pushsection .fixup,"ax"
.align 0
......
......@@ -13,7 +13,6 @@
#include <asm/assembler.h>
#include <asm/asm-offsets.h>
#include <asm/cache.h>
#include <asm/export.h>
#define COPY_COUNT (PAGE_SZ / (2 * L1_CACHE_BYTES) PLD( -1 ))
......@@ -46,4 +45,3 @@ ENTRY(copy_page)
PLD( beq 2b )
ldmfd sp!, {r4, pc} @ 3
ENDPROC(copy_page)
EXPORT_SYMBOL(copy_page)
......@@ -13,7 +13,6 @@
#include <linux/linkage.h>
#include <asm/assembler.h>
#include <asm/unwind.h>
#include <asm/export.h>
/*
* Prototype:
......@@ -100,9 +99,6 @@ WEAK(arm_copy_to_user)
ENDPROC(arm_copy_to_user)
ENDPROC(__copy_to_user_std)
#ifndef CONFIG_UACCESS_WITH_MEMCPY
EXPORT_SYMBOL(arm_copy_to_user)
#endif
.pushsection .text.fixup,"ax"
.align 0
......
......@@ -9,7 +9,6 @@
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
#include <asm/export.h>
.text
......@@ -31,4 +30,4 @@ ENTRY(__csum_ipv6_magic)
adcs r0, r0, #0
ldmfd sp!, {pc}
ENDPROC(__csum_ipv6_magic)
EXPORT_SYMBOL(__csum_ipv6_magic)
......@@ -9,7 +9,6 @@
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
#include <asm/export.h>
.text
......@@ -141,4 +140,3 @@ ENTRY(csum_partial)
bne 4b
b .Lless4
ENDPROC(csum_partial)
EXPORT_SYMBOL(csum_partial)
......@@ -49,6 +49,5 @@
#define FN_ENTRY ENTRY(csum_partial_copy_nocheck)
#define FN_EXIT ENDPROC(csum_partial_copy_nocheck)
#define FN_EXPORT EXPORT_SYMBOL(csum_partial_copy_nocheck)
#include "csumpartialcopygeneric.S"
......@@ -8,7 +8,6 @@
* published by the Free Software Foundation.
*/
#include <asm/assembler.h>
#include <asm/export.h>
/*
* unsigned int
......@@ -332,4 +331,3 @@ FN_ENTRY
mov r5, r4, get_byte_1
b .Lexit
FN_EXIT
FN_EXPORT
......@@ -73,7 +73,6 @@
#define FN_ENTRY ENTRY(csum_partial_copy_from_user)
#define FN_EXIT ENDPROC(csum_partial_copy_from_user)
#define FN_EXPORT EXPORT_SYMBOL(csum_partial_copy_from_user)
#include "csumpartialcopygeneric.S"
......
......@@ -17,24 +17,23 @@
.LC1: .word UDELAY_MULT
/*
* loops = r0 * HZ * loops_per_jiffy / 1000000
*
* r0 <= 2000
* HZ <= 1000
*/
ENTRY(__loop_udelay)
ldr r2, .LC1
mul r0, r2, r0
ENTRY(__loop_const_udelay) @ 0 <= r0 <= 0x7fffff06
mul r0, r2, r0 @ r0 = delay_us * UDELAY_MULT
ENTRY(__loop_const_udelay) @ 0 <= r0 <= 0xfffffaf0
ldr r2, .LC0
ldr r2, [r2]
umull r1, r0, r2, r0
adds r1, r1, #0xffffffff
adcs r0, r0, r0
umull r1, r0, r2, r0 @ r0-r1 = r0 * loops_per_jiffy
adds r1, r1, #0xffffffff @ rounding up ...
adcs r0, r0, r0 @ and right shift by 31
reteq lr
/*
* loops = r0 * HZ * loops_per_jiffy / 1000000
*/
.align 3
@ Delay routine
......
......@@ -24,7 +24,6 @@
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/export.h>
#include <linux/timex.h>
/*
......@@ -35,7 +34,6 @@ struct arm_delay_ops arm_delay_ops __ro_after_init = {
.const_udelay = __loop_const_udelay,
.udelay = __loop_udelay,
};
EXPORT_SYMBOL(arm_delay_ops);
static const struct delay_timer *delay_timer;
static bool delay_calibrated;
......
......@@ -15,7 +15,6 @@
#include <linux/linkage.h>
#include <asm/assembler.h>
#include <asm/unwind.h>
#include <asm/export.h>
#ifdef __ARMEB__
#define xh r0
......@@ -211,4 +210,3 @@ Ldiv0_64:
UNWIND(.fnend)
ENDPROC(__do_div64)
EXPORT_SYMBOL(__do_div64)
......@@ -15,7 +15,6 @@
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
#include <asm/export.h>
.text
/*
......@@ -38,7 +37,6 @@ ENTRY(_find_first_zero_bit_le)
3: mov r0, r1 @ no free bits
ret lr
ENDPROC(_find_first_zero_bit_le)
EXPORT_SYMBOL(_find_first_zero_bit_le)
/*
* Purpose : Find next 'zero' bit
......@@ -59,7 +57,6 @@ ENTRY(_find_next_zero_bit_le)
add r2, r2, #1 @ align bit pointer
b 2b @ loop for next bit
ENDPROC(_find_next_zero_bit_le)
EXPORT_SYMBOL(_find_next_zero_bit_le)
/*
* Purpose : Find a 'one' bit
......@@ -81,7 +78,6 @@ ENTRY(_find_first_bit_le)
3: mov r0, r1 @ no free bits
ret lr
ENDPROC(_find_first_bit_le)
EXPORT_SYMBOL(_find_first_bit_le)
/*
* Purpose : Find next 'one' bit
......@@ -101,7 +97,6 @@ ENTRY(_find_next_bit_le)
add r2, r2, #1 @ align bit pointer
b 2b @ loop for next bit
ENDPROC(_find_next_bit_le)
EXPORT_SYMBOL(_find_next_bit_le)
#ifdef __ARMEB__
......@@ -121,7 +116,6 @@ ENTRY(_find_first_zero_bit_be)
3: mov r0, r1 @ no free bits
ret lr
ENDPROC(_find_first_zero_bit_be)
EXPORT_SYMBOL(_find_first_zero_bit_be)
ENTRY(_find_next_zero_bit_be)
teq r1, #0
......@@ -139,7 +133,6 @@ ENTRY(_find_next_zero_bit_be)
add r2, r2, #1 @ align bit pointer
b 2b @ loop for next bit
ENDPROC(_find_next_zero_bit_be)
EXPORT_SYMBOL(_find_next_zero_bit_be)
ENTRY(_find_first_bit_be)
teq r1, #0
......@@ -157,7 +150,6 @@ ENTRY(_find_first_bit_be)
3: mov r0, r1 @ no free bits
ret lr
ENDPROC(_find_first_bit_be)
EXPORT_SYMBOL(_find_first_bit_be)
ENTRY(_find_next_bit_be)
teq r1, #0
......@@ -174,7 +166,6 @@ ENTRY(_find_next_bit_be)
add r2, r2, #1 @ align bit pointer
b 2b @ loop for next bit
ENDPROC(_find_next_bit_be)
EXPORT_SYMBOL(_find_next_bit_be)
#endif
......
......@@ -31,7 +31,6 @@
#include <asm/assembler.h>
#include <asm/errno.h>
#include <asm/domain.h>
#include <asm/export.h>
ENTRY(__get_user_1)
check_uaccess r0, 1, r1, r2, __get_user_bad
......@@ -39,7 +38,6 @@ ENTRY(__get_user_1)
mov r0, #0
ret lr
ENDPROC(__get_user_1)
EXPORT_SYMBOL(__get_user_1)
ENTRY(__get_user_2)
check_uaccess r0, 2, r1, r2, __get_user_bad
......@@ -60,7 +58,6 @@ rb .req r0
mov r0, #0
ret lr
ENDPROC(__get_user_2)
EXPORT_SYMBOL(__get_user_2)
ENTRY(__get_user_4)
check_uaccess r0, 4, r1, r2, __get_user_bad
......@@ -68,7 +65,6 @@ ENTRY(__get_user_4)
mov r0, #0
ret lr
ENDPROC(__get_user_4)
EXPORT_SYMBOL(__get_user_4)
ENTRY(__get_user_8)
check_uaccess r0, 8, r1, r2, __get_user_bad
......@@ -82,7 +78,6 @@ ENTRY(__get_user_8)
mov r0, #0
ret lr
ENDPROC(__get_user_8)
EXPORT_SYMBOL(__get_user_8)
#ifdef __ARMEB__
ENTRY(__get_user_32t_8)
......@@ -96,7 +91,6 @@ ENTRY(__get_user_32t_8)
mov r0, #0
ret lr
ENDPROC(__get_user_32t_8)
EXPORT_SYMBOL(__get_user_32t_8)
ENTRY(__get_user_64t_1)
check_uaccess r0, 1, r1, r2, __get_user_bad8
......@@ -104,7 +98,6 @@ ENTRY(__get_user_64t_1)
mov r0, #0
ret lr
ENDPROC(__get_user_64t_1)
EXPORT_SYMBOL(__get_user_64t_1)
ENTRY(__get_user_64t_2)
check_uaccess r0, 2, r1, r2, __get_user_bad8
......@@ -121,7 +114,6 @@ rb .req r0
mov r0, #0
ret lr
ENDPROC(__get_user_64t_2)
EXPORT_SYMBOL(__get_user_64t_2)
ENTRY(__get_user_64t_4)
check_uaccess r0, 4, r1, r2, __get_user_bad8
......@@ -129,7 +121,6 @@ ENTRY(__get_user_64t_4)
mov r0, #0
ret lr
ENDPROC(__get_user_64t_4)
EXPORT_SYMBOL(__get_user_64t_4)
#endif
__get_user_bad8:
......
......@@ -9,7 +9,6 @@
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
#include <asm/export.h>
.Linsb_align: rsb ip, ip, #4
cmp ip, r2
......@@ -122,4 +121,3 @@ ENTRY(__raw_readsb)
ldmfd sp!, {r4 - r6, pc}
ENDPROC(__raw_readsb)
EXPORT_SYMBOL(__raw_readsb)
......@@ -9,7 +9,6 @@
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
#include <asm/export.h>
ENTRY(__raw_readsl)
teq r2, #0 @ do we have to check for the zero len?
......@@ -78,4 +77,3 @@ ENTRY(__raw_readsl)
strb r3, [r1, #0]
ret lr
ENDPROC(__raw_readsl)
EXPORT_SYMBOL(__raw_readsl)
......@@ -9,7 +9,6 @@
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
#include <asm/export.h>
.Linsw_bad_alignment:
adr r0, .Linsw_bad_align_msg
......@@ -104,4 +103,4 @@ ENTRY(__raw_readsw)
ldmfd sp!, {r4, r5, r6, pc}
EXPORT_SYMBOL(__raw_readsw)
......@@ -9,7 +9,6 @@
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
#include <asm/export.h>
.macro pack, rd, hw1, hw2
#ifndef __ARMEB__
......@@ -130,4 +129,3 @@ ENTRY(__raw_readsw)
strneb ip, [r1]
ldmfd sp!, {r4, pc}
ENDPROC(__raw_readsw)
EXPORT_SYMBOL(__raw_readsw)
......@@ -9,7 +9,6 @@
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
#include <asm/export.h>
.macro outword, rd
#ifndef __ARMEB__
......@@ -93,4 +92,3 @@ ENTRY(__raw_writesb)
ldmfd sp!, {r4, r5, pc}
ENDPROC(__raw_writesb)
EXPORT_SYMBOL(__raw_writesb)
......@@ -9,7 +9,6 @@
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
#include <asm/export.h>
ENTRY(__raw_writesl)
teq r2, #0 @ do we have to check for the zero len?
......@@ -66,4 +65,3 @@ ENTRY(__raw_writesl)
bne 6b
ret lr
ENDPROC(__raw_writesl)
EXPORT_SYMBOL(__raw_writesl)
......@@ -9,7 +9,6 @@
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
#include <asm/export.h>
.Loutsw_bad_alignment:
adr r0, .Loutsw_bad_align_msg
......@@ -125,4 +124,3 @@ ENTRY(__raw_writesw)
strne ip, [r0]
ldmfd sp!, {r4, r5, r6, pc}
EXPORT_SYMBOL(__raw_writesw)
......@@ -9,7 +9,6 @@
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
#include <asm/export.h>
.macro outword, rd
#ifndef __ARMEB__
......@@ -99,4 +98,3 @@ ENTRY(__raw_writesw)
strneh ip, [r0]
ret lr
ENDPROC(__raw_writesw)
EXPORT_SYMBOL(__raw_writesw)
......@@ -36,7 +36,6 @@ Boston, MA 02111-1307, USA. */
#include <linux/linkage.h>
#include <asm/assembler.h>
#include <asm/unwind.h>
#include <asm/export.h>
.macro ARM_DIV_BODY dividend, divisor, result, curbit
......@@ -239,8 +238,6 @@ UNWIND(.fnstart)
UNWIND(.fnend)
ENDPROC(__udivsi3)
ENDPROC(__aeabi_uidiv)
EXPORT_SYMBOL(__udivsi3)
EXPORT_SYMBOL(__aeabi_uidiv)
ENTRY(__umodsi3)
UNWIND(.fnstart)
......@@ -259,7 +256,6 @@ UNWIND(.fnstart)
UNWIND(.fnend)
ENDPROC(__umodsi3)
EXPORT_SYMBOL(__umodsi3)
#ifdef CONFIG_ARM_PATCH_IDIV
.align 3
......@@ -307,8 +303,6 @@ UNWIND(.fnstart)
UNWIND(.fnend)
ENDPROC(__divsi3)
ENDPROC(__aeabi_idiv)
EXPORT_SYMBOL(__divsi3)
EXPORT_SYMBOL(__aeabi_idiv)
ENTRY(__modsi3)
UNWIND(.fnstart)
......@@ -333,7 +327,6 @@ UNWIND(.fnstart)
UNWIND(.fnend)
ENDPROC(__modsi3)
EXPORT_SYMBOL(__modsi3)
#ifdef CONFIG_AEABI
......@@ -350,7 +343,6 @@ UNWIND(.save {r0, r1, ip, lr} )
UNWIND(.fnend)
ENDPROC(__aeabi_uidivmod)
EXPORT_SYMBOL(__aeabi_uidivmod)
ENTRY(__aeabi_idivmod)
UNWIND(.fnstart)
......@@ -364,7 +356,6 @@ UNWIND(.save {r0, r1, ip, lr} )
UNWIND(.fnend)
ENDPROC(__aeabi_idivmod)
EXPORT_SYMBOL(__aeabi_idivmod)
#endif
......
......@@ -28,7 +28,6 @@ Boston, MA 02110-1301, USA. */
#include <linux/linkage.h>
#include <asm/assembler.h>
#include <asm/export.h>
#ifdef __ARMEB__
#define al r1
......@@ -53,5 +52,3 @@ ENTRY(__aeabi_llsr)
ENDPROC(__lshrdi3)
ENDPROC(__aeabi_llsr)
EXPORT_SYMBOL(__lshrdi3)
EXPORT_SYMBOL(__aeabi_llsr)
......@@ -11,7 +11,6 @@
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
#include <asm/export.h>
.text
.align 5
......@@ -25,4 +24,3 @@ ENTRY(memchr)
2: movne r0, #0
ret lr
ENDPROC(memchr)
EXPORT_SYMBOL(memchr)
......@@ -13,7 +13,6 @@
#include <linux/linkage.h>
#include <asm/assembler.h>
#include <asm/unwind.h>
#include <asm/export.h>
#define LDR1W_SHIFT 0
#define STR1W_SHIFT 0
......@@ -69,5 +68,3 @@ ENTRY(memcpy)
ENDPROC(memcpy)
ENDPROC(mmiocpy)
EXPORT_SYMBOL(memcpy)
EXPORT_SYMBOL(mmiocpy)
......@@ -13,7 +13,6 @@
#include <linux/linkage.h>
#include <asm/assembler.h>
#include <asm/unwind.h>
#include <asm/export.h>
.text
......@@ -226,4 +225,3 @@ ENTRY(memmove)
18: backward_copy_shift push=24 pull=8
ENDPROC(memmove)
EXPORT_SYMBOL(memmove)
......@@ -12,7 +12,6 @@
#include <linux/linkage.h>
#include <asm/assembler.h>
#include <asm/unwind.h>
#include <asm/export.h>
.text
.align 5
......@@ -136,5 +135,3 @@ UNWIND( .fnstart )
UNWIND( .fnend )
ENDPROC(memset)
ENDPROC(mmioset)
EXPORT_SYMBOL(memset)
EXPORT_SYMBOL(mmioset)
......@@ -10,7 +10,6 @@
#include <linux/linkage.h>
#include <asm/assembler.h>
#include <asm/unwind.h>
#include <asm/export.h>
.text
.align 5
......@@ -136,4 +135,3 @@ UNWIND( .fnstart )
ret lr @ 1
UNWIND( .fnend )
ENDPROC(__memzero)
EXPORT_SYMBOL(__memzero)
......@@ -12,7 +12,6 @@
#include <linux/linkage.h>
#include <asm/assembler.h>
#include <asm/export.h>
#ifdef __ARMEB__
#define xh r0
......@@ -47,5 +46,3 @@ ENTRY(__aeabi_lmul)
ENDPROC(__muldi3)
ENDPROC(__aeabi_lmul)
EXPORT_SYMBOL(__muldi3)
EXPORT_SYMBOL(__aeabi_lmul)
......@@ -31,7 +31,6 @@
#include <asm/assembler.h>
#include <asm/errno.h>
#include <asm/domain.h>
#include <asm/export.h>
ENTRY(__put_user_1)
check_uaccess r0, 1, r1, ip, __put_user_bad
......@@ -39,7 +38,6 @@ ENTRY(__put_user_1)
mov r0, #0
ret lr
ENDPROC(__put_user_1)
EXPORT_SYMBOL(__put_user_1)
ENTRY(__put_user_2)
check_uaccess r0, 2, r1, ip, __put_user_bad
......@@ -64,7 +62,6 @@ ENTRY(__put_user_2)
mov r0, #0
ret lr
ENDPROC(__put_user_2)
EXPORT_SYMBOL(__put_user_2)
ENTRY(__put_user_4)
check_uaccess r0, 4, r1, ip, __put_user_bad
......@@ -72,7 +69,6 @@ ENTRY(__put_user_4)
mov r0, #0
ret lr
ENDPROC(__put_user_4)
EXPORT_SYMBOL(__put_user_4)
ENTRY(__put_user_8)
check_uaccess r0, 8, r1, ip, __put_user_bad
......@@ -86,7 +82,6 @@ ENTRY(__put_user_8)
mov r0, #0
ret lr
ENDPROC(__put_user_8)
EXPORT_SYMBOL(__put_user_8)
__put_user_bad:
mov r0, #-EFAULT
......
......@@ -11,7 +11,6 @@
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
#include <asm/export.h>
.text
.align 5
......@@ -26,4 +25,3 @@ ENTRY(strchr)
subeq r0, r0, #1
ret lr
ENDPROC(strchr)
EXPORT_SYMBOL(strchr)
......@@ -11,7 +11,6 @@
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
#include <asm/export.h>
.text
.align 5
......@@ -25,4 +24,3 @@ ENTRY(strrchr)
mov r0, r3
ret lr
ENDPROC(strrchr)
EXPORT_SYMBOL(strrchr)
......@@ -19,7 +19,6 @@
#include <linux/gfp.h>
#include <linux/highmem.h>
#include <linux/hugetlb.h>
#include <linux/export.h>
#include <asm/current.h>
#include <asm/page.h>
......@@ -157,7 +156,6 @@ arm_copy_to_user(void __user *to, const void *from, unsigned long n)
}
return n;
}
EXPORT_SYMBOL(arm_copy_to_user);
static unsigned long noinline
__clear_user_memset(void __user *addr, unsigned long n)
......@@ -215,7 +213,6 @@ unsigned long arm_clear_user(void __user *addr, unsigned long n)
}
return n;
}
EXPORT_SYMBOL(arm_clear_user);
#if 0
......
......@@ -12,7 +12,6 @@
#include <linux/linkage.h>
#include <asm/assembler.h>
#include <asm/export.h>
#ifdef __ARMEB__
#define xh r0
......@@ -36,7 +35,6 @@ ENTRY(__ucmpdi2)
ret lr
ENDPROC(__ucmpdi2)
EXPORT_SYMBOL(__ucmpdi2)
#ifdef CONFIG_AEABI
......@@ -50,7 +48,6 @@ ENTRY(__aeabi_ulcmp)
ret lr
ENDPROC(__aeabi_ulcmp)
EXPORT_SYMBOL(__aeabi_ulcmp)
#endif
......@@ -32,6 +32,7 @@ endif
ifdef CONFIG_SND_IMX_SOC
obj-y += ssi-fiq.o
obj-y += ssi-fiq-ksym.o
endif
# i.MX21 based machines
......
/*
* Exported ksyms for the SSI FIQ handler
*
* Copyright (C) 2009, Sascha Hauer <s.hauer@pengutronix.de>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/module.h>
#include <linux/platform_data/asoc-imx-ssi.h>
EXPORT_SYMBOL(imx_ssi_fiq_tx_buffer);
EXPORT_SYMBOL(imx_ssi_fiq_rx_buffer);
EXPORT_SYMBOL(imx_ssi_fiq_start);
EXPORT_SYMBOL(imx_ssi_fiq_end);
EXPORT_SYMBOL(imx_ssi_fiq_base);
......@@ -8,7 +8,6 @@
#include <linux/linkage.h>
#include <asm/assembler.h>
#include <asm/export.h>
/*
* r8 = bit 0-15: tx offset, bit 16-31: tx buffer size
......@@ -145,8 +144,4 @@ imx_ssi_fiq_tx_buffer:
.word 0x0
.L_imx_ssi_fiq_end:
imx_ssi_fiq_end:
EXPORT_SYMBOL(imx_ssi_fiq_tx_buffer)
EXPORT_SYMBOL(imx_ssi_fiq_rx_buffer)
EXPORT_SYMBOL(imx_ssi_fiq_start)
EXPORT_SYMBOL(imx_ssi_fiq_end)
EXPORT_SYMBOL(imx_ssi_fiq_base)
This diff is collapsed.
......@@ -56,8 +56,4 @@ static inline unsigned long get_clock_tick_rate(void)
#include "SA-1100.h"
#ifdef CONFIG_SA1101
#include "SA-1101.h"
#endif
#endif /* _ASM_ARCH_HARDWARE_H */
......@@ -991,7 +991,7 @@ config CACHE_TAUROS2
config CACHE_UNIPHIER
bool "Enable the UniPhier outer cache controller"
depends on ARCH_UNIPHIER
default y
select ARM_L1_CACHE_SHIFT_7
select OUTER_CACHE
select OUTER_CACHE_SYNC
help
......@@ -1012,8 +1012,14 @@ config ARM_L1_CACHE_SHIFT_6
help
Setting ARM L1 cache line size to 64 Bytes.
config ARM_L1_CACHE_SHIFT_7
bool
help
Setting ARM L1 cache line size to 128 Bytes.
config ARM_L1_CACHE_SHIFT
int
default 7 if ARM_L1_CACHE_SHIFT_7
default 6 if ARM_L1_CACHE_SHIFT_6
default 5
......
......@@ -7,7 +7,7 @@
* : r4 = aborted context pc
* : r5 = aborted context psr
*
* Returns : r4-r5, r10-r11, r13 preserved
* Returns : r4-r5, r9-r11, r13 preserved
*
* Purpose : obtain information about current aborted instruction.
* Note: we read user space. This means we might cause a data
......@@ -48,7 +48,10 @@ ENTRY(v4t_late_abort)
/* c */ b do_DataAbort @ ldc rd, [rn], #m @ Same as ldr rd, [rn], #m
/* d */ b do_DataAbort @ ldc rd, [rn, #m]
/* e */ b .data_unknown
/* f */
/* f */ b .data_unknown
.data_unknown_r9:
ldr r9, [sp], #4
.data_unknown: @ Part of jumptable
mov r0, r4
mov r1, r8
......@@ -57,6 +60,7 @@ ENTRY(v4t_late_abort)
.data_arm_ldmstm:
tst r8, #1 << 21 @ check writeback bit
beq do_DataAbort @ no writeback -> no fixup
str r9, [sp, #-4]!
mov r7, #0x11
orr r7, r7, #0x1100
and r6, r8, r7
......@@ -75,12 +79,14 @@ ENTRY(v4t_late_abort)
subne r7, r7, r6, lsl #2 @ Undo increment
addeq r7, r7, r6, lsl #2 @ Undo decrement
str r7, [r2, r9, lsr #14] @ Put register 'Rn'
ldr r9, [sp], #4
b do_DataAbort
.data_arm_lateldrhpre:
tst r8, #1 << 21 @ Check writeback bit
beq do_DataAbort @ No writeback -> no fixup
.data_arm_lateldrhpost:
str r9, [sp, #-4]!
and r9, r8, #0x00f @ get Rm / low nibble of immediate value
tst r8, #1 << 22 @ if (immediate offset)
andne r6, r8, #0xf00 @ { immediate high nibble
......@@ -93,6 +99,7 @@ ENTRY(v4t_late_abort)
subne r7, r7, r6 @ Undo incrmenet
addeq r7, r7, r6 @ Undo decrement
str r7, [r2, r9, lsr #14] @ Put register 'Rn'
ldr r9, [sp], #4
b do_DataAbort
.data_arm_lateldrpreconst:
......@@ -101,12 +108,14 @@ ENTRY(v4t_late_abort)
.data_arm_lateldrpostconst:
movs r6, r8, lsl #20 @ Get offset
beq do_DataAbort @ zero -> no fixup
str r9, [sp, #-4]!
and r9, r8, #15 << 16 @ Extract 'n' from instruction
ldr r7, [r2, r9, lsr #14] @ Get register 'Rn'
tst r8, #1 << 23 @ Check U bit
subne r7, r7, r6, lsr #20 @ Undo increment
addeq r7, r7, r6, lsr #20 @ Undo decrement
str r7, [r2, r9, lsr #14] @ Put register 'Rn'
ldr r9, [sp], #4
b do_DataAbort
.data_arm_lateldrprereg:
......@@ -115,6 +124,7 @@ ENTRY(v4t_late_abort)
.data_arm_lateldrpostreg:
and r7, r8, #15 @ Extract 'm' from instruction
ldr r6, [r2, r7, lsl #2] @ Get register 'Rm'
str r9, [sp, #-4]!
mov r9, r8, lsr #7 @ get shift count
ands r9, r9, #31
and r7, r8, #0x70 @ get shift type
......@@ -126,33 +136,33 @@ ENTRY(v4t_late_abort)
b .data_arm_apply_r6_and_rn
b .data_arm_apply_r6_and_rn @ 1: LSL #0
nop
b .data_unknown @ 2: MUL?
b .data_unknown_r9 @ 2: MUL?
nop
b .data_unknown @ 3: MUL?
b .data_unknown_r9 @ 3: MUL?
nop
mov r6, r6, lsr r9 @ 4: LSR #!0
b .data_arm_apply_r6_and_rn
mov r6, r6, lsr #32 @ 5: LSR #32
b .data_arm_apply_r6_and_rn
b .data_unknown @ 6: MUL?
b .data_unknown_r9 @ 6: MUL?
nop
b .data_unknown @ 7: MUL?
b .data_unknown_r9 @ 7: MUL?
nop
mov r6, r6, asr r9 @ 8: ASR #!0
b .data_arm_apply_r6_and_rn
mov r6, r6, asr #32 @ 9: ASR #32
b .data_arm_apply_r6_and_rn
b .data_unknown @ A: MUL?
b .data_unknown_r9 @ A: MUL?
nop
b .data_unknown @ B: MUL?
b .data_unknown_r9 @ B: MUL?
nop
mov r6, r6, ror r9 @ C: ROR #!0
b .data_arm_apply_r6_and_rn
mov r6, r6, rrx @ D: RRX
b .data_arm_apply_r6_and_rn
b .data_unknown @ E: MUL?
b .data_unknown_r9 @ E: MUL?
nop
b .data_unknown @ F: MUL?
b .data_unknown_r9 @ F: MUL?
.data_thumb_abort:
ldrh r8, [r4] @ read instruction
......@@ -190,6 +200,7 @@ ENTRY(v4t_late_abort)
.data_thumb_pushpop:
tst r8, #1 << 10
beq .data_unknown
str r9, [sp, #-4]!
and r6, r8, #0x55 @ hweight8(r8) + R bit
and r9, r8, #0xaa
add r6, r6, r9, lsr #1
......@@ -204,9 +215,11 @@ ENTRY(v4t_late_abort)
addeq r7, r7, r6, lsl #2 @ increment SP if PUSH
subne r7, r7, r6, lsl #2 @ decrement SP if POP
str r7, [r2, #13 << 2]
ldr r9, [sp], #4
b do_DataAbort
.data_thumb_ldmstm:
str r9, [sp, #-4]!
and r6, r8, #0x55 @ hweight8(r8)
and r9, r8, #0xaa
add r6, r6, r9, lsr #1
......@@ -219,4 +232,5 @@ ENTRY(v4t_late_abort)
and r6, r6, #15 @ number of regs to transfer
sub r7, r7, r6, lsl #2 @ always decrement
str r7, [r2, r9, lsr #6]
ldr r9, [sp], #4
b do_DataAbort
......@@ -1167,7 +1167,7 @@ static int __init dma_debug_do_init(void)
dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
return 0;
}
fs_initcall(dma_debug_do_init);
core_initcall(dma_debug_do_init);
#ifdef CONFIG_ARM_DMA_USE_IOMMU
......
......@@ -34,28 +34,29 @@ static int change_page_range(pte_t *ptep, pgtable_t token, unsigned long addr,
return 0;
}
static bool in_range(unsigned long start, unsigned long size,
unsigned long range_start, unsigned long range_end)
{
return start >= range_start && start < range_end &&
size <= range_end - start;
}
static int change_memory_common(unsigned long addr, int numpages,
pgprot_t set_mask, pgprot_t clear_mask)
{
unsigned long start = addr;
unsigned long size = PAGE_SIZE*numpages;
unsigned long end = start + size;
unsigned long start = addr & PAGE_MASK;
unsigned long end = PAGE_ALIGN(addr) + numpages * PAGE_SIZE;
unsigned long size = end - start;
int ret;
struct page_change_data data;
if (!IS_ALIGNED(addr, PAGE_SIZE)) {
start &= PAGE_MASK;
end = start + size;
WARN_ON_ONCE(1);
}
WARN_ON_ONCE(start != addr);
if (!numpages)
if (!size)
return 0;
if (start < MODULES_VADDR || start >= MODULES_END)
return -EINVAL;
if (end < MODULES_VADDR || start >= MODULES_END)
if (!in_range(start, size, MODULES_VADDR, MODULES_END) &&
!in_range(start, size, VMALLOC_START, VMALLOC_END))
return -EINVAL;
data.set_mask = set_mask;
......
......@@ -96,7 +96,7 @@ ENTRY(cpu_cm7_proc_fin)
ret lr
ENDPROC(cpu_cm7_proc_fin)
.section ".text.init", #alloc, #execinstr
.section ".init.text", #alloc, #execinstr
__v7m_cm7_setup:
mov r8, #(V7M_SCB_CCR_DC | V7M_SCB_CCR_IC| V7M_SCB_CCR_BP)
......
This diff is collapsed.
......@@ -155,8 +155,8 @@ struct vfp_single {
u32 significand;
};
extern s32 vfp_get_float(unsigned int reg);
extern void vfp_put_float(s32 val, unsigned int reg);
asmlinkage s32 vfp_get_float(unsigned int reg);
asmlinkage void vfp_put_float(s32 val, unsigned int reg);
/*
* VFP_SINGLE_MANTISSA_BITS - number of bits in the mantissa
......@@ -270,8 +270,8 @@ struct vfp_double {
#else
#define VFP_REG_ZERO 16
#endif
extern u64 vfp_get_double(unsigned int reg);
extern void vfp_put_double(u64 val, unsigned int reg);
asmlinkage u64 vfp_get_double(unsigned int reg);
asmlinkage void vfp_put_double(u64 val, unsigned int reg);
#define VFP_DOUBLE_MANTISSA_BITS (52)
#define VFP_DOUBLE_EXPONENT_BITS (11)
......@@ -377,4 +377,4 @@ struct op {
u32 flags;
};
extern void vfp_save_state(void *location, u32 fpexc);
asmlinkage void vfp_save_state(void *location, u32 fpexc);
......@@ -34,11 +34,11 @@
/*
* Our undef handlers (in entry.S)
*/
void vfp_testing_entry(void);
void vfp_support_entry(void);
void vfp_null_entry(void);
asmlinkage void vfp_testing_entry(void);
asmlinkage void vfp_support_entry(void);
asmlinkage void vfp_null_entry(void);
void (*vfp_vector)(void) = vfp_null_entry;
asmlinkage void (*vfp_vector)(void) = vfp_null_entry;
/*
* Dual-use variable.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment