Commit 66d46672 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-linus' of git://git.armlinux.org.uk/~rmk/linux-arm

Pull ARM updates from Russell King:

 - an update for clkdev registration error detection to simplify users

 - add cpu capacity parsing from DT

 - support for larger cachelines found on UniPhier caches

 - documentation for udelay constants

 - properly tag assembly function declarations

 - remove unnecessary indirection of asm/mach-types.h

 - switch to syscall table based generation to simplify future additions
   of system calls, along with correpsonding commit for pkey syscalls

 - remove redundant sa1101 header file

 - RONX protect modules when they're in the vmalloc region

* 'for-linus' of git://git.armlinux.org.uk/~rmk/linux-arm:
  ARM: mm: allow set_memory_*() to be used on the vmalloc region
  ARM: mm: fix set_memory_*() bounds checks
  ARM: 8631/1: clkdev: Detect errors in clk_hw_register_clkdev() for mass registration
  ARM: 8629/1: vfp: properly tag assembly function declarations in C code
  ARM: 8622/3: add sysfs cpu_capacity attribute
  ARM: 8621/3: parse cpu capacity-dmips-mhz from DT
  ARM: 8623/1: mm: add ARM_L1_CACHE_SHIFT_7 for UniPhier outer cache
  ARM: Update mach-types
  ARM: sa1100: remove SA-1101 header file
  ARM: 8619/1: udelay: document the various constants
  ARM: wire up new pkey syscalls
  ARM: convert to generated system call tables
  ARM: remove indirection of asm/mach-types.h
parents 991688bf ed141f28
...@@ -312,8 +312,11 @@ all: $(KBUILD_IMAGE) $(KBUILD_DTBS) ...@@ -312,8 +312,11 @@ all: $(KBUILD_IMAGE) $(KBUILD_DTBS)
boot := arch/arm/boot boot := arch/arm/boot
archheaders:
$(Q)$(MAKE) $(build)=arch/arm/tools uapi
archprepare: archprepare:
$(Q)$(MAKE) $(build)=arch/arm/tools include/generated/mach-types.h $(Q)$(MAKE) $(build)=arch/arm/tools kapi
# Convert bzImage to zImage # Convert bzImage to zImage
bzImage: zImage bzImage: zImage
......
...@@ -38,3 +38,6 @@ generic-y += termios.h ...@@ -38,3 +38,6 @@ generic-y += termios.h
generic-y += timex.h generic-y += timex.h
generic-y += trace_clock.h generic-y += trace_clock.h
generic-y += unaligned.h generic-y += unaligned.h
generated-y += mach-types.h
generated-y += unistd-nr.h
...@@ -9,6 +9,33 @@ ...@@ -9,6 +9,33 @@
#include <asm/memory.h> #include <asm/memory.h>
#include <asm/param.h> /* HZ */ #include <asm/param.h> /* HZ */
/*
* Loop (or tick) based delay:
*
* loops = loops_per_jiffy * jiffies_per_sec * delay_us / us_per_sec
*
* where:
*
* jiffies_per_sec = HZ
* us_per_sec = 1000000
*
* Therefore the constant part is HZ / 1000000 which is a small
* fractional number. To make this usable with integer math, we
* scale up this constant by 2^31, perform the actual multiplication,
* and scale the result back down by 2^31 with a simple shift:
*
* loops = (loops_per_jiffy * delay_us * UDELAY_MULT) >> 31
*
* where:
*
* UDELAY_MULT = 2^31 * HZ / 1000000
* = (2^31 / 1000000) * HZ
* = 2147.483648 * HZ
* = 2147 * HZ + 483648 * HZ / 1000000
*
* 31 is the biggest scale shift value that won't overflow 32 bits for
* delay_us * UDELAY_MULT assuming HZ <= 1000 and delay_us <= 2000.
*/
#define MAX_UDELAY_MS 2 #define MAX_UDELAY_MS 2
#define UDELAY_MULT UL(2147 * HZ + 483648 * HZ / 1000000) #define UDELAY_MULT UL(2147 * HZ + 483648 * HZ / 1000000)
#define UDELAY_SHIFT 31 #define UDELAY_SHIFT 31
......
#include <generated/mach-types.h>
...@@ -14,12 +14,7 @@ ...@@ -14,12 +14,7 @@
#define __ASM_ARM_UNISTD_H #define __ASM_ARM_UNISTD_H
#include <uapi/asm/unistd.h> #include <uapi/asm/unistd.h>
#include <asm/unistd-nr.h>
/*
* This may need to be greater than __NR_last_syscall+1 in order to
* account for the padding in the syscall table
*/
#define __NR_syscalls (400)
#define __ARCH_WANT_STAT64 #define __ARCH_WANT_STAT64
#define __ARCH_WANT_SYS_GETHOSTNAME #define __ARCH_WANT_SYS_GETHOSTNAME
...@@ -52,4 +47,23 @@ ...@@ -52,4 +47,23 @@
#define __IGNORE_fadvise64_64 #define __IGNORE_fadvise64_64
#define __IGNORE_migrate_pages #define __IGNORE_migrate_pages
#ifdef __ARM_EABI__
/*
* The following syscalls are obsolete and no longer available for EABI:
* __NR_time
* __NR_umount
* __NR_stime
* __NR_alarm
* __NR_utime
* __NR_getrlimit
* __NR_select
* __NR_readdir
* __NR_mmap
* __NR_socketcall
* __NR_syscall
* __NR_ipc
*/
#define __IGNORE_getrlimit
#endif
#endif /* __ASM_ARM_UNISTD_H */ #endif /* __ASM_ARM_UNISTD_H */
...@@ -18,3 +18,6 @@ header-y += stat.h ...@@ -18,3 +18,6 @@ header-y += stat.h
header-y += statfs.h header-y += statfs.h
header-y += swab.h header-y += swab.h
header-y += unistd.h header-y += unistd.h
genhdr-y += unistd-common.h
genhdr-y += unistd-oabi.h
genhdr-y += unistd-eabi.h
This diff is collapsed.
This diff is collapsed.
...@@ -12,6 +12,11 @@ ...@@ -12,6 +12,11 @@
#include <asm/unistd.h> #include <asm/unistd.h>
#include <asm/ftrace.h> #include <asm/ftrace.h>
#include <asm/unwind.h> #include <asm/unwind.h>
#ifdef CONFIG_AEABI
#include <asm/unistd-oabi.h>
#endif
.equ NR_syscalls, __NR_syscalls
#ifdef CONFIG_NEED_RET_TO_USER #ifdef CONFIG_NEED_RET_TO_USER
#include <mach/entry-macro.S> #include <mach/entry-macro.S>
...@@ -120,21 +125,6 @@ ENTRY(ret_from_fork) ...@@ -120,21 +125,6 @@ ENTRY(ret_from_fork)
b ret_slow_syscall b ret_slow_syscall
ENDPROC(ret_from_fork) ENDPROC(ret_from_fork)
.equ NR_syscalls,0
#define CALL(x) .equ NR_syscalls,NR_syscalls+1
#include "calls.S"
/*
* Ensure that the system call table is equal to __NR_syscalls,
* which is the value the rest of the system sees
*/
.ifne NR_syscalls - __NR_syscalls
.error "__NR_syscalls is not equal to the size of the syscall table"
.endif
#undef CALL
#define CALL(x) .long x
/*============================================================================= /*=============================================================================
* SWI handler * SWI handler
*----------------------------------------------------------------------------- *-----------------------------------------------------------------------------
...@@ -291,22 +281,48 @@ __cr_alignment: ...@@ -291,22 +281,48 @@ __cr_alignment:
#endif #endif
.ltorg .ltorg
.macro syscall_table_start, sym
.equ __sys_nr, 0
.type \sym, #object
ENTRY(\sym)
.endm
.macro syscall, nr, func
.ifgt __sys_nr - \nr
.error "Duplicated/unorded system call entry"
.endif
.rept \nr - __sys_nr
.long sys_ni_syscall
.endr
.long \func
.equ __sys_nr, \nr + 1
.endm
.macro syscall_table_end, sym
.ifgt __sys_nr - __NR_syscalls
.error "System call table too big"
.endif
.rept __NR_syscalls - __sys_nr
.long sys_ni_syscall
.endr
.size \sym, . - \sym
.endm
#define NATIVE(nr, func) syscall nr, func
/* /*
* This is the syscall table declaration for native ABI syscalls. * This is the syscall table declaration for native ABI syscalls.
* With EABI a couple syscalls are obsolete and defined as sys_ni_syscall. * With EABI a couple syscalls are obsolete and defined as sys_ni_syscall.
*/ */
#define ABI(native, compat) native syscall_table_start sys_call_table
#define COMPAT(nr, native, compat) syscall nr, native
#ifdef CONFIG_AEABI #ifdef CONFIG_AEABI
#define OBSOLETE(syscall) sys_ni_syscall #include <calls-eabi.S>
#else #else
#define OBSOLETE(syscall) syscall #include <calls-oabi.S>
#endif #endif
#undef COMPAT
.type sys_call_table, #object syscall_table_end sys_call_table
ENTRY(sys_call_table)
#include "calls.S"
#undef ABI
#undef OBSOLETE
/*============================================================================ /*============================================================================
* Special system call wrappers * Special system call wrappers
...@@ -407,14 +423,10 @@ ENDPROC(sys_oabi_readahead) ...@@ -407,14 +423,10 @@ ENDPROC(sys_oabi_readahead)
* Let's declare a second syscall table for old ABI binaries * Let's declare a second syscall table for old ABI binaries
* using the compatibility syscall entries. * using the compatibility syscall entries.
*/ */
#define ABI(native, compat) compat syscall_table_start sys_oabi_call_table
#define OBSOLETE(syscall) syscall #define COMPAT(nr, native, compat) syscall nr, compat
#include <calls-oabi.S>
.type sys_oabi_call_table, #object syscall_table_end sys_oabi_call_table
ENTRY(sys_oabi_call_table)
#include "calls.S"
#undef ABI
#undef OBSOLETE
#endif #endif
...@@ -12,6 +12,7 @@ ...@@ -12,6 +12,7 @@
*/ */
#include <linux/cpu.h> #include <linux/cpu.h>
#include <linux/cpufreq.h>
#include <linux/cpumask.h> #include <linux/cpumask.h>
#include <linux/export.h> #include <linux/export.h>
#include <linux/init.h> #include <linux/init.h>
...@@ -21,7 +22,9 @@ ...@@ -21,7 +22,9 @@
#include <linux/of.h> #include <linux/of.h>
#include <linux/sched.h> #include <linux/sched.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/string.h>
#include <asm/cpu.h>
#include <asm/cputype.h> #include <asm/cputype.h>
#include <asm/topology.h> #include <asm/topology.h>
...@@ -41,6 +44,7 @@ ...@@ -41,6 +44,7 @@
* updated during this sequence. * updated during this sequence.
*/ */
static DEFINE_PER_CPU(unsigned long, cpu_scale) = SCHED_CAPACITY_SCALE; static DEFINE_PER_CPU(unsigned long, cpu_scale) = SCHED_CAPACITY_SCALE;
static DEFINE_MUTEX(cpu_scale_mutex);
unsigned long arch_scale_cpu_capacity(struct sched_domain *sd, int cpu) unsigned long arch_scale_cpu_capacity(struct sched_domain *sd, int cpu)
{ {
...@@ -52,6 +56,65 @@ static void set_capacity_scale(unsigned int cpu, unsigned long capacity) ...@@ -52,6 +56,65 @@ static void set_capacity_scale(unsigned int cpu, unsigned long capacity)
per_cpu(cpu_scale, cpu) = capacity; per_cpu(cpu_scale, cpu) = capacity;
} }
#ifdef CONFIG_PROC_SYSCTL
static ssize_t cpu_capacity_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct cpu *cpu = container_of(dev, struct cpu, dev);
return sprintf(buf, "%lu\n",
arch_scale_cpu_capacity(NULL, cpu->dev.id));
}
static ssize_t cpu_capacity_store(struct device *dev,
struct device_attribute *attr,
const char *buf,
size_t count)
{
struct cpu *cpu = container_of(dev, struct cpu, dev);
int this_cpu = cpu->dev.id, i;
unsigned long new_capacity;
ssize_t ret;
if (count) {
ret = kstrtoul(buf, 0, &new_capacity);
if (ret)
return ret;
if (new_capacity > SCHED_CAPACITY_SCALE)
return -EINVAL;
mutex_lock(&cpu_scale_mutex);
for_each_cpu(i, &cpu_topology[this_cpu].core_sibling)
set_capacity_scale(i, new_capacity);
mutex_unlock(&cpu_scale_mutex);
}
return count;
}
static DEVICE_ATTR_RW(cpu_capacity);
static int register_cpu_capacity_sysctl(void)
{
int i;
struct device *cpu;
for_each_possible_cpu(i) {
cpu = get_cpu_device(i);
if (!cpu) {
pr_err("%s: too early to get CPU%d device!\n",
__func__, i);
continue;
}
device_create_file(cpu, &dev_attr_cpu_capacity);
}
return 0;
}
subsys_initcall(register_cpu_capacity_sysctl);
#endif
#ifdef CONFIG_OF #ifdef CONFIG_OF
struct cpu_efficiency { struct cpu_efficiency {
const char *compatible; const char *compatible;
...@@ -78,6 +141,146 @@ static unsigned long *__cpu_capacity; ...@@ -78,6 +141,146 @@ static unsigned long *__cpu_capacity;
#define cpu_capacity(cpu) __cpu_capacity[cpu] #define cpu_capacity(cpu) __cpu_capacity[cpu]
static unsigned long middle_capacity = 1; static unsigned long middle_capacity = 1;
static bool cap_from_dt = true;
static u32 *raw_capacity;
static bool cap_parsing_failed;
static u32 capacity_scale;
static int __init parse_cpu_capacity(struct device_node *cpu_node, int cpu)
{
int ret = 1;
u32 cpu_capacity;
if (cap_parsing_failed)
return !ret;
ret = of_property_read_u32(cpu_node,
"capacity-dmips-mhz",
&cpu_capacity);
if (!ret) {
if (!raw_capacity) {
raw_capacity = kcalloc(num_possible_cpus(),
sizeof(*raw_capacity),
GFP_KERNEL);
if (!raw_capacity) {
pr_err("cpu_capacity: failed to allocate memory for raw capacities\n");
cap_parsing_failed = true;
return !ret;
}
}
capacity_scale = max(cpu_capacity, capacity_scale);
raw_capacity[cpu] = cpu_capacity;
pr_debug("cpu_capacity: %s cpu_capacity=%u (raw)\n",
cpu_node->full_name, raw_capacity[cpu]);
} else {
if (raw_capacity) {
pr_err("cpu_capacity: missing %s raw capacity\n",
cpu_node->full_name);
pr_err("cpu_capacity: partial information: fallback to 1024 for all CPUs\n");
}
cap_parsing_failed = true;
kfree(raw_capacity);
}
return !ret;
}
static void normalize_cpu_capacity(void)
{
u64 capacity;
int cpu;
if (!raw_capacity || cap_parsing_failed)
return;
pr_debug("cpu_capacity: capacity_scale=%u\n", capacity_scale);
mutex_lock(&cpu_scale_mutex);
for_each_possible_cpu(cpu) {
capacity = (raw_capacity[cpu] << SCHED_CAPACITY_SHIFT)
/ capacity_scale;
set_capacity_scale(cpu, capacity);
pr_debug("cpu_capacity: CPU%d cpu_capacity=%lu\n",
cpu, arch_scale_cpu_capacity(NULL, cpu));
}
mutex_unlock(&cpu_scale_mutex);
}
#ifdef CONFIG_CPU_FREQ
static cpumask_var_t cpus_to_visit;
static bool cap_parsing_done;
static void parsing_done_workfn(struct work_struct *work);
static DECLARE_WORK(parsing_done_work, parsing_done_workfn);
static int
init_cpu_capacity_callback(struct notifier_block *nb,
unsigned long val,
void *data)
{
struct cpufreq_policy *policy = data;
int cpu;
if (cap_parsing_failed || cap_parsing_done)
return 0;
switch (val) {
case CPUFREQ_NOTIFY:
pr_debug("cpu_capacity: init cpu capacity for CPUs [%*pbl] (to_visit=%*pbl)\n",
cpumask_pr_args(policy->related_cpus),
cpumask_pr_args(cpus_to_visit));
cpumask_andnot(cpus_to_visit,
cpus_to_visit,
policy->related_cpus);
for_each_cpu(cpu, policy->related_cpus) {
raw_capacity[cpu] = arch_scale_cpu_capacity(NULL, cpu) *
policy->cpuinfo.max_freq / 1000UL;
capacity_scale = max(raw_capacity[cpu], capacity_scale);
}
if (cpumask_empty(cpus_to_visit)) {
normalize_cpu_capacity();
kfree(raw_capacity);
pr_debug("cpu_capacity: parsing done\n");
cap_parsing_done = true;
schedule_work(&parsing_done_work);
}
}
return 0;
}
static struct notifier_block init_cpu_capacity_notifier = {
.notifier_call = init_cpu_capacity_callback,
};
static int __init register_cpufreq_notifier(void)
{
if (cap_parsing_failed)
return -EINVAL;
if (!alloc_cpumask_var(&cpus_to_visit, GFP_KERNEL)) {
pr_err("cpu_capacity: failed to allocate memory for cpus_to_visit\n");
return -ENOMEM;
}
cpumask_copy(cpus_to_visit, cpu_possible_mask);
return cpufreq_register_notifier(&init_cpu_capacity_notifier,
CPUFREQ_POLICY_NOTIFIER);
}
core_initcall(register_cpufreq_notifier);
static void parsing_done_workfn(struct work_struct *work)
{
cpufreq_unregister_notifier(&init_cpu_capacity_notifier,
CPUFREQ_POLICY_NOTIFIER);
}
#else
static int __init free_raw_capacity(void)
{
kfree(raw_capacity);
return 0;
}
core_initcall(free_raw_capacity);
#endif
/* /*
* Iterate all CPUs' descriptor in DT and compute the efficiency * Iterate all CPUs' descriptor in DT and compute the efficiency
...@@ -99,6 +302,12 @@ static void __init parse_dt_topology(void) ...@@ -99,6 +302,12 @@ static void __init parse_dt_topology(void)
__cpu_capacity = kcalloc(nr_cpu_ids, sizeof(*__cpu_capacity), __cpu_capacity = kcalloc(nr_cpu_ids, sizeof(*__cpu_capacity),
GFP_NOWAIT); GFP_NOWAIT);
cn = of_find_node_by_path("/cpus");
if (!cn) {
pr_err("No CPU information found in DT\n");
return;
}
for_each_possible_cpu(cpu) { for_each_possible_cpu(cpu) {
const u32 *rate; const u32 *rate;
int len; int len;
...@@ -110,6 +319,13 @@ static void __init parse_dt_topology(void) ...@@ -110,6 +319,13 @@ static void __init parse_dt_topology(void)
continue; continue;
} }
if (parse_cpu_capacity(cn, cpu)) {
of_node_put(cn);
continue;
}
cap_from_dt = false;
for (cpu_eff = table_efficiency; cpu_eff->compatible; cpu_eff++) for (cpu_eff = table_efficiency; cpu_eff->compatible; cpu_eff++)
if (of_device_is_compatible(cn, cpu_eff->compatible)) if (of_device_is_compatible(cn, cpu_eff->compatible))
break; break;
...@@ -151,6 +367,8 @@ static void __init parse_dt_topology(void) ...@@ -151,6 +367,8 @@ static void __init parse_dt_topology(void)
middle_capacity = ((max_capacity / 3) middle_capacity = ((max_capacity / 3)
>> (SCHED_CAPACITY_SHIFT-1)) + 1; >> (SCHED_CAPACITY_SHIFT-1)) + 1;
if (cap_from_dt && !cap_parsing_failed)
normalize_cpu_capacity();
} }
/* /*
...@@ -160,7 +378,7 @@ static void __init parse_dt_topology(void) ...@@ -160,7 +378,7 @@ static void __init parse_dt_topology(void)
*/ */
static void update_cpu_capacity(unsigned int cpu) static void update_cpu_capacity(unsigned int cpu)
{ {
if (!cpu_capacity(cpu)) if (!cpu_capacity(cpu) || cap_from_dt)
return; return;
set_capacity_scale(cpu, cpu_capacity(cpu) / middle_capacity); set_capacity_scale(cpu, cpu_capacity(cpu) / middle_capacity);
......
...@@ -17,24 +17,23 @@ ...@@ -17,24 +17,23 @@
.LC1: .word UDELAY_MULT .LC1: .word UDELAY_MULT
/* /*
* loops = r0 * HZ * loops_per_jiffy / 1000000
*
* r0 <= 2000 * r0 <= 2000
* HZ <= 1000 * HZ <= 1000
*/ */
ENTRY(__loop_udelay) ENTRY(__loop_udelay)
ldr r2, .LC1 ldr r2, .LC1
mul r0, r2, r0 mul r0, r2, r0 @ r0 = delay_us * UDELAY_MULT
ENTRY(__loop_const_udelay) @ 0 <= r0 <= 0x7fffff06 ENTRY(__loop_const_udelay) @ 0 <= r0 <= 0xfffffaf0
ldr r2, .LC0 ldr r2, .LC0
ldr r2, [r2] ldr r2, [r2]
umull r1, r0, r2, r0 umull r1, r0, r2, r0 @ r0-r1 = r0 * loops_per_jiffy
adds r1, r1, #0xffffffff adds r1, r1, #0xffffffff @ rounding up ...
adcs r0, r0, r0 adcs r0, r0, r0 @ and right shift by 31
reteq lr reteq lr
/*
* loops = r0 * HZ * loops_per_jiffy / 1000000
*/
.align 3 .align 3
@ Delay routine @ Delay routine
......
This diff is collapsed.
...@@ -52,8 +52,4 @@ ...@@ -52,8 +52,4 @@
#include "SA-1100.h" #include "SA-1100.h"
#ifdef CONFIG_SA1101
#include "SA-1101.h"
#endif
#endif /* _ASM_ARCH_HARDWARE_H */ #endif /* _ASM_ARCH_HARDWARE_H */
...@@ -991,7 +991,7 @@ config CACHE_TAUROS2 ...@@ -991,7 +991,7 @@ config CACHE_TAUROS2
config CACHE_UNIPHIER config CACHE_UNIPHIER
bool "Enable the UniPhier outer cache controller" bool "Enable the UniPhier outer cache controller"
depends on ARCH_UNIPHIER depends on ARCH_UNIPHIER
default y select ARM_L1_CACHE_SHIFT_7
select OUTER_CACHE select OUTER_CACHE
select OUTER_CACHE_SYNC select OUTER_CACHE_SYNC
help help
...@@ -1012,8 +1012,14 @@ config ARM_L1_CACHE_SHIFT_6 ...@@ -1012,8 +1012,14 @@ config ARM_L1_CACHE_SHIFT_6
help help
Setting ARM L1 cache line size to 64 Bytes. Setting ARM L1 cache line size to 64 Bytes.
config ARM_L1_CACHE_SHIFT_7
bool
help
Setting ARM L1 cache line size to 128 Bytes.
config ARM_L1_CACHE_SHIFT config ARM_L1_CACHE_SHIFT
int int
default 7 if ARM_L1_CACHE_SHIFT_7
default 6 if ARM_L1_CACHE_SHIFT_6 default 6 if ARM_L1_CACHE_SHIFT_6
default 5 default 5
......
...@@ -34,28 +34,29 @@ static int change_page_range(pte_t *ptep, pgtable_t token, unsigned long addr, ...@@ -34,28 +34,29 @@ static int change_page_range(pte_t *ptep, pgtable_t token, unsigned long addr,
return 0; return 0;
} }
static bool in_range(unsigned long start, unsigned long size,
unsigned long range_start, unsigned long range_end)
{
return start >= range_start && start < range_end &&
size <= range_end - start;
}
static int change_memory_common(unsigned long addr, int numpages, static int change_memory_common(unsigned long addr, int numpages,
pgprot_t set_mask, pgprot_t clear_mask) pgprot_t set_mask, pgprot_t clear_mask)
{ {
unsigned long start = addr; unsigned long start = addr & PAGE_MASK;
unsigned long size = PAGE_SIZE*numpages; unsigned long end = PAGE_ALIGN(addr) + numpages * PAGE_SIZE;
unsigned long end = start + size; unsigned long size = end - start;
int ret; int ret;
struct page_change_data data; struct page_change_data data;
if (!IS_ALIGNED(addr, PAGE_SIZE)) { WARN_ON_ONCE(start != addr);
start &= PAGE_MASK;
end = start + size;
WARN_ON_ONCE(1);
}
if (!numpages) if (!size)
return 0; return 0;
if (start < MODULES_VADDR || start >= MODULES_END) if (!in_range(start, size, MODULES_VADDR, MODULES_END) &&
return -EINVAL; !in_range(start, size, VMALLOC_START, VMALLOC_END))
if (end < MODULES_VADDR || start >= MODULES_END)
return -EINVAL; return -EINVAL;
data.set_mask = set_mask; data.set_mask = set_mask;
......
...@@ -4,10 +4,76 @@ ...@@ -4,10 +4,76 @@
# Copyright (C) 2001 Russell King # Copyright (C) 2001 Russell King
# #
gen := arch/$(ARCH)/include/generated
kapi := $(gen)/asm
uapi := $(gen)/uapi/asm
syshdr := $(srctree)/$(src)/syscallhdr.sh
sysnr := $(srctree)/$(src)/syscallnr.sh
systbl := $(srctree)/$(src)/syscalltbl.sh
syscall := $(srctree)/$(src)/syscall.tbl
gen-y := $(gen)/calls-oabi.S
gen-y += $(gen)/calls-eabi.S
kapi-hdrs-y := $(kapi)/unistd-nr.h
kapi-hdrs-y += $(kapi)/mach-types.h
uapi-hdrs-y := $(uapi)/unistd-common.h
uapi-hdrs-y += $(uapi)/unistd-oabi.h
uapi-hdrs-y += $(uapi)/unistd-eabi.h
targets += $(addprefix ../../../,$(gen-y) $(kapi-hdrs-y) $(uapi-hdrs-y))
PHONY += kapi uapi
kapi: $(kapi-hdrs-y) $(gen-y)
uapi: $(uapi-hdrs-y)
# Create output directory if not already present
_dummy := $(shell [ -d '$(kapi)' ] || mkdir -p '$(kapi)') \
$(shell [ -d '$(uapi)' ] || mkdir -p '$(uapi)')
quiet_cmd_gen_mach = GEN $@ quiet_cmd_gen_mach = GEN $@
cmd_gen_mach = mkdir -p $(dir $@) && \ cmd_gen_mach = mkdir -p $(dir $@) && \
$(AWK) -f $(filter-out $(PHONY),$^) > $@ || \ $(AWK) -f $(filter-out $(PHONY),$^) > $@ || \
{ rm -f $@; /bin/false; } { rm -f $@; /bin/false; }
include/generated/mach-types.h: $(src)/gen-mach-types $(src)/mach-types FORCE $(kapi)/mach-types.h: $(src)/gen-mach-types $(src)/mach-types FORCE
$(call if_changed,gen_mach) $(call if_changed,gen_mach)
quiet_cmd_syshdr = SYSHDR $@
cmd_syshdr = $(CONFIG_SHELL) '$(syshdr)' '$<' '$@' \
'$(syshdr_abi_$(basetarget))' \
'$(syshdr_pfx_$(basetarget))' \
'__NR_SYSCALL_BASE'
quiet_cmd_systbl = SYSTBL $@
cmd_systbl = $(CONFIG_SHELL) '$(systbl)' '$<' '$@' \
'$(systbl_abi_$(basetarget))'
quiet_cmd_sysnr = SYSNR $@
cmd_sysnr = $(CONFIG_SHELL) '$(sysnr)' '$<' '$@' \
'$(syshdr_abi_$(basetarget))'
syshdr_abi_unistd-common := common
$(uapi)/unistd-common.h: $(syscall) $(syshdr) FORCE
$(call if_changed,syshdr)
syshdr_abi_unistd-oabi := oabi
$(uapi)/unistd-oabi.h: $(syscall) $(syshdr) FORCE
$(call if_changed,syshdr)
syshdr_abi_unistd-eabi := eabi
$(uapi)/unistd-eabi.h: $(syscall) $(syshdr) FORCE
$(call if_changed,syshdr)
sysnr_abi_unistd-nr := common,oabi,eabi,compat
$(kapi)/unistd-nr.h: $(syscall) $(sysnr) FORCE
$(call if_changed,sysnr)
systbl_abi_calls-oabi := common,oabi
$(gen)/calls-oabi.S: $(syscall) $(systbl) FORCE
$(call if_changed,systbl)
systbl_abi_calls-eabi := common,eabi
$(gen)/calls-eabi.S: $(syscall) $(systbl) FORCE
$(call if_changed,systbl)
This diff is collapsed.
This diff is collapsed.
#!/bin/sh
in="$1"
out="$2"
my_abis=`echo "($3)" | tr ',' '|'`
prefix="$4"
offset="$5"
fileguard=_ASM_ARM_`basename "$out" | sed \
-e 'y/abcdefghijklmnopqrstuvwxyz/ABCDEFGHIJKLMNOPQRSTUVWXYZ/' \
-e 's/[^A-Z0-9_]/_/g' -e 's/__/_/g'`
if echo $out | grep -q uapi; then
fileguard="_UAPI$fileguard"
fi
grep -E "^[0-9A-Fa-fXx]+[[:space:]]+${my_abis}" "$in" | sort -n | (
echo "#ifndef ${fileguard}"
echo "#define ${fileguard} 1"
echo ""
while read nr abi name entry ; do
if [ -z "$offset" ]; then
echo "#define __NR_${prefix}${name} $nr"
else
echo "#define __NR_${prefix}${name} ($offset + $nr)"
fi
done
echo ""
echo "#endif /* ${fileguard} */"
) > "$out"
#!/bin/sh
in="$1"
out="$2"
my_abis=`echo "($3)" | tr ',' '|'`
align=1
fileguard=_ASM_ARM_`basename "$out" | sed \
-e 'y/abcdefghijklmnopqrstuvwxyz/ABCDEFGHIJKLMNOPQRSTUVWXYZ/' \
-e 's/[^A-Z0-9_]/_/g' -e 's/__/_/g'`
grep -E "^[0-9A-Fa-fXx]+[[:space:]]+${my_abis}" "$in" | sort -n | tail -n1 | (
echo "#ifndef ${fileguard}
#define ${fileguard} 1
/*
* This needs to be greater than __NR_last_syscall+1 in order to account
* for the padding in the syscall table.
*/
"
while read nr abi name entry; do
nr=$(($nr + 1))
while [ "$(($nr / (256 * $align) ))" -gt 0 ]; do
align=$(( $align * 4 ))
done
nr=$(( ($nr + $align - 1) & ~($align - 1) ))
echo "/* aligned to $align */"
echo "#define __NR_syscalls $nr"
done
echo ""
echo "#endif /* ${fileguard} */"
) > "$out"
#!/bin/sh
in="$1"
out="$2"
my_abis=`echo "($3)" | tr ',' '|'`
grep -E "^[0-9A-Fa-fXx]+[[:space:]]+${my_abis}" "$in" | sort -n | (
while read nr abi name entry compat; do
if [ "$abi" = "eabi" -a -n "$compat" ]; then
echo "$in: error: a compat entry for an EABI syscall ($name) makes no sense" >&2
exit 1
fi
if [ -n "$entry" ]; then
if [ -z "$compat" ]; then
echo "NATIVE($nr, $entry)"
else
echo "COMPAT($nr, $entry, $compat)"
fi
fi
done
) > "$out"
...@@ -155,8 +155,8 @@ struct vfp_single { ...@@ -155,8 +155,8 @@ struct vfp_single {
u32 significand; u32 significand;
}; };
extern s32 vfp_get_float(unsigned int reg); asmlinkage s32 vfp_get_float(unsigned int reg);
extern void vfp_put_float(s32 val, unsigned int reg); asmlinkage void vfp_put_float(s32 val, unsigned int reg);
/* /*
* VFP_SINGLE_MANTISSA_BITS - number of bits in the mantissa * VFP_SINGLE_MANTISSA_BITS - number of bits in the mantissa
...@@ -270,8 +270,8 @@ struct vfp_double { ...@@ -270,8 +270,8 @@ struct vfp_double {
#else #else
#define VFP_REG_ZERO 16 #define VFP_REG_ZERO 16
#endif #endif
extern u64 vfp_get_double(unsigned int reg); asmlinkage u64 vfp_get_double(unsigned int reg);
extern void vfp_put_double(u64 val, unsigned int reg); asmlinkage void vfp_put_double(u64 val, unsigned int reg);
#define VFP_DOUBLE_MANTISSA_BITS (52) #define VFP_DOUBLE_MANTISSA_BITS (52)
#define VFP_DOUBLE_EXPONENT_BITS (11) #define VFP_DOUBLE_EXPONENT_BITS (11)
...@@ -377,4 +377,4 @@ struct op { ...@@ -377,4 +377,4 @@ struct op {
u32 flags; u32 flags;
}; };
extern void vfp_save_state(void *location, u32 fpexc); asmlinkage void vfp_save_state(void *location, u32 fpexc);
...@@ -34,11 +34,11 @@ ...@@ -34,11 +34,11 @@
/* /*
* Our undef handlers (in entry.S) * Our undef handlers (in entry.S)
*/ */
void vfp_testing_entry(void); asmlinkage void vfp_testing_entry(void);
void vfp_support_entry(void); asmlinkage void vfp_support_entry(void);
void vfp_null_entry(void); asmlinkage void vfp_null_entry(void);
void (*vfp_vector)(void) = vfp_null_entry; asmlinkage void (*vfp_vector)(void) = vfp_null_entry;
/* /*
* Dual-use variable. * Dual-use variable.
......
...@@ -448,12 +448,20 @@ EXPORT_SYMBOL(clk_register_clkdev); ...@@ -448,12 +448,20 @@ EXPORT_SYMBOL(clk_register_clkdev);
* *
* con_id or dev_id may be NULL as a wildcard, just as in the rest of * con_id or dev_id may be NULL as a wildcard, just as in the rest of
* clkdev. * clkdev.
*
* To make things easier for mass registration, we detect error clk_hws
* from a previous clk_hw_register_*() call, and return the error code for
* those. This is to permit this function to be called immediately
* after clk_hw_register_*().
*/ */
int clk_hw_register_clkdev(struct clk_hw *hw, const char *con_id, int clk_hw_register_clkdev(struct clk_hw *hw, const char *con_id,
const char *dev_id) const char *dev_id)
{ {
struct clk_lookup *cl; struct clk_lookup *cl;
if (IS_ERR(hw))
return PTR_ERR(hw);
/* /*
* Since dev_id can be NULL, and NULL is handled specially, we must * Since dev_id can be NULL, and NULL is handled specially, we must
* pass it as either a NULL format string, or with "%s". * pass it as either a NULL format string, or with "%s".
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment