Commit 63038e5f authored by Russell King's avatar Russell King

Miscellaneous compiler warning fixes, other small fixes and

cleanups for ARM.
parent d1aa641f
......@@ -18,14 +18,14 @@
* Please select one of the following when turning on debugging.
*/
#ifdef DEBUG
#if 0 /* DC21285-type */
#if defined(CONFIG_DEBUG_DC21285_PORT)
.macro loadsp, rb
mov \rb, #0x7c000000
.endm
.macro writeb, rb
strb \rb, [r3, #0x3f8]
.endm
#elif 0 /* RiscPC-type */
#elif defined(CONFIG_ARCH_RPC)
.macro loadsp, rb
mov \rb, #0x03000000
orr \rb, \rb, #0x00010000
......@@ -33,7 +33,7 @@
.macro writeb, rb
strb \rb, [r3, #0x3f8 << 2]
.endm
#elif 0 /* integrator-type */
#elif defined(CONFIG_ARCH_INTEGRATOR)
.macro loadsp, rb
mov \rb, #0x16000000
.endm
......
......@@ -694,7 +694,6 @@ dep_bool ' Verbose kernel error messages' CONFIG_DEBUG_ERRORS $CONFIG_DEBUG_KER
dep_bool ' Kernel low-level debugging functions' CONFIG_DEBUG_LL $CONFIG_DEBUG_KERNEL
dep_bool ' Kernel low-level debugging messages via footbridge serial port' CONFIG_DEBUG_DC21285_PORT $CONFIG_DEBUG_LL $CONFIG_FOOTBRIDGE
dep_bool ' Kernel low-level debugging messages via UART2' CONFIG_DEBUG_CLPS711X_UART2 $CONFIG_DEBUG_LL $CONFIG_ARCH_CLPS711X
dep_bool ' Kernel low-level debugging messages via SA1100 Ser3 (otherwise Ser1)' CONFIG_DEBUG_LL_SER3 $CONFIG_DEBUG_LL $CONFIG_ARCH_SA1100
endmenu
source lib/Config.in
......@@ -385,6 +385,55 @@
bne 1001b
.endm
#elif defined(CONFIG_ARCH_IQ80310)
.macro addruart,rx
mov \rx, #0xfe000000 @ physical
orr \rx, \rx, #0x00810000
.endm
.macro senduart,rd,rx
strb \rd, [\rx]
.endm
.macro busyuart,rd,rx
1002: ldrb \rd, [\rx, #0x5]
and \rd, \rd, #0x60
teq \rd, #0x60
bne 1002b
.endm
.macro waituart,rd,rx
1001: ldrb \rd, [\rx, #0x6]
tst \rd, #0x10
beq 1001b
.endm
#elif defined(CONFIG_ARCH_ADI_EVB)
.macro addruart,rx
mrc p15, 0, \rx, c1, c0
tst \rx, #1 @ MMU enabled?
mov \rx, #0x00400000 @ physical base address
orrne \rx, \rx, #0xff000000 @ virtual base
.endm
.macro senduart,rd,rx
strb \rd, [\rx]
.endm
.macro busyuart,rd,rx
1002: ldrb \rd, [\rx, #0x5]
and \rd, \rd, #0x60
teq \rd, #0x60
bne 1002b
.endm
.macro waituart,rd,rx
1001: ldrb \rd, [\rx, #0x6]
tst \rd, #0x10
beq 1001b
.endm
#else
#error Unknown architecture
#endif
......
......@@ -6,7 +6,6 @@
#include <asm/errno.h>
#include <asm/hardware.h>
#include <asm/arch/irqs.h>
#include <asm/proc-fns.h>
#ifndef MODE_SVC
#define MODE_SVC 0x13
......
......@@ -27,6 +27,7 @@
#include <asm/atomic.h>
#include <asm/io.h>
#include <asm/pgalloc.h>
#include <asm/pgtable.h>
#include <asm/system.h>
#include <asm/uaccess.h>
......@@ -335,6 +336,25 @@ static int bad_syscall(int n, struct pt_regs *regs)
return regs->ARM_r0;
}
static inline void
do_cache_op(unsigned long start, unsigned long end, int flags)
{
struct vm_area_struct *vma;
if (end < start)
return;
vma = find_vma(current->active_mm, start);
if (vma && vma->vm_start < end) {
if (start < vma->vm_start)
start = vma->vm_start;
if (end > vma->vm_end)
end = vma->vm_end;
flush_cache_range(vma, start, end);
}
}
/*
* Handle all unrecognised system calls.
* 0x9f0000 - 0x9fffff are some more esoteric system calls
......@@ -392,7 +412,7 @@ asmlinkage int arm_syscall(int no, struct pt_regs *regs)
* the specified region).
*/
case NR(cacheflush):
cpu_cache_clean_invalidate_range(regs->ARM_r0, regs->ARM_r1, 1);
do_cache_op(regs->ARM_r0, regs->ARM_r1, regs->ARM_r2);
return 0;
case NR(usr26):
......
......@@ -62,7 +62,7 @@ static u_long iq80310_read_timer (void)
*/
static unsigned long iq80310_gettimeoffset (void)
{
unsigned long elapsed, usec, tmp1;
unsigned long elapsed, usec;
unsigned int stat1, stat2;
stat1 = *(volatile u8 *)IQ80310_INT_STAT;
......
......@@ -399,24 +399,6 @@ static u_int h3600_uart_get_mctrl(struct uart_port *port)
return ret;
}
static void h3600_dcd_intr(int irq, void *dev_id, struct pt_regs *regs)
{
struct uart_info *info = dev_id;
/* Note: should only call this if something has changed */
spin_lock_irq(&info->lock);
uart_handle_dcd_change(info, !(GPLR & GPIO_H3600_COM_DCD));
spin_unlock_irq(&info->lock);
}
static void h3600_cts_intr(int irq, void *dev_id, struct pt_regs *regs)
{
struct uart_info *info = dev_id;
/* Note: should only call this if something has changed */
spin_lock_irq(&info->lock);
uart_handle_cts_change(info, !(GPLR & GPIO_H3600_COM_CTS));
spin_unlock_irq(&info->lock);
}
static void h3600_uart_pm(struct uart_port *port, u_int state, u_int oldstate)
{
if (port->mapbase == _Ser2UTCR0) {
......@@ -444,47 +426,11 @@ static int h3600_uart_set_wake(struct uart_port *port, u_int enable)
return err;
}
static int h3600_uart_open(struct uart_port *port, struct uart_info *info)
{
int ret = 0;
if (port->mapbase == _Ser2UTCR0) {
Ser2UTCR4 = UTCR4_HSE;
Ser2HSCR0 = 0;
Ser2HSSR0 = HSSR0_EIF | HSSR0_TUR |
HSSR0_RAB | HSSR0_FRE;
} else if (port->mapbase == _Ser3UTCR0) {
set_GPIO_IRQ_edge(GPIO_H3600_COM_DCD|GPIO_H3600_COM_CTS,
GPIO_BOTH_EDGES);
ret = request_irq(IRQ_GPIO_H3600_COM_DCD, h3600_dcd_intr,
0, "RS232 DCD", info);
if (ret)
return ret;
ret = request_irq(IRQ_GPIO_H3600_COM_CTS, h3600_cts_intr,
0, "RS232 CTS", info);
if (ret)
free_irq(IRQ_GPIO_H3600_COM_DCD, info);
}
return ret;
}
static void h3600_uart_close(struct uart_port *port, struct uart_info *info)
{
if (port->mapbase == _Ser3UTCR0) {
free_irq(IRQ_GPIO_H3600_COM_DCD, info);
free_irq(IRQ_GPIO_H3600_COM_CTS, info);
}
}
static struct sa1100_port_fns h3600_port_fns __initdata = {
set_mctrl: h3600_uart_set_mctrl,
get_mctrl: h3600_uart_get_mctrl,
pm: h3600_uart_pm,
set_wake: h3600_uart_set_wake,
open: h3600_uart_open,
close: h3600_uart_close,
};
static struct map_desc h3600_io_desc[] __initdata = {
......
......@@ -243,10 +243,8 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, pte_t pte)
struct page *page = pte_page(pte);
if (VALID_PAGE(page) && page->mapping) {
if (test_and_clear_bit(PG_dcache_dirty, &page->flags)) {
unsigned long kvirt = (unsigned long)page_address(page);
cpu_cache_clean_invalidate_range(kvirt, kvirt + PAGE_SIZE, 0);
}
if (test_and_clear_bit(PG_dcache_dirty, &page->flags))
__flush_dcache_page(page);
make_coherent(vma, addr, page);
}
......
......@@ -60,10 +60,6 @@ __setup("nowb", nowrite_setup);
#define FIRST_KERNEL_PGD_NR (FIRST_USER_PGD_NR + USER_PTRS_PER_PGD)
#define clean_cache_area(start,size) \
cpu_cache_clean_invalidate_range((unsigned long)start, ((unsigned long)start) + size, 0);
/*
* need to get a 16k page for level 1
*/
......@@ -114,10 +110,7 @@ pgd_t *get_pgd_slow(struct mm_struct *mm)
memcpy(new_pgd + FIRST_KERNEL_PGD_NR, init_pgd + FIRST_KERNEL_PGD_NR,
(PTRS_PER_PGD - FIRST_KERNEL_PGD_NR) * sizeof(pgd_t));
/*
* FIXME: this should not be necessary
*/
clean_cache_area(new_pgd, PTRS_PER_PGD * sizeof(pgd_t));
clean_dcache_area(new_pgd, PTRS_PER_PGD * sizeof(pgd_t));
return new_pgd;
......
/*
* We need constants.h for:
* VMA_VM_MM
* VMA_VM_FLAGS
* VM_EXEC
*/
#include <asm/constants.h>
#include <asm/thread_info.h>
/*
......
......@@ -79,10 +79,12 @@ ENTRY(v3_flush_kern_tlb_page)
mcr p15, 0, r0, c6, c0, 0 @ invalidate TLB entry
mov pc, lr
.section ".text.init", #alloc, #execinstr
ENTRY(v3_tlb_fns)
.word v3_flush_kern_tlb_all
.word v3_flush_user_tlb_mm
.word v3_flush_user_tlb_range
.word v3_flush_user_tlb_page
.word v3_flush_kern_tlb_page
.long v3_flush_kern_tlb_all
.long v3_flush_user_tlb_mm
.long v3_flush_user_tlb_range
.long v3_flush_user_tlb_page
.long v3_flush_kern_tlb_page
......@@ -98,9 +98,11 @@ ENTRY(v4_flush_kern_tlb_page)
mcr p15, 0, r0, c8, c6, 1 @ invalidate D TLB entry
mov pc, lr
.section ".text.init", #alloc, #execinstr
ENTRY(v4_tlb_fns)
.word v4_flush_kern_tlb_all
.word v4_flush_user_tlb_mm
.word v4_flush_user_tlb_range
.word v4_flush_user_tlb_page
.word v4_flush_kern_tlb_page
.long v4_flush_kern_tlb_all
.long v4_flush_user_tlb_mm
.long v4_flush_user_tlb_range
.long v4_flush_user_tlb_page
.long v4_flush_kern_tlb_page
......@@ -144,17 +144,19 @@ ENTRY(v4wbi_flush_kern_tlb_page)
mcr p15, 0, r0, c8, c6, 1 @ invalidate D TLB entry
mov pc, lr
.section ".text.init", #alloc, #execinstr
ENTRY(v4wb_tlb_fns)
.word v4wb_flush_kern_tlb_all
.word v4wb_flush_user_tlb_mm
.word v4wb_flush_user_tlb_range
.word v4wb_flush_user_tlb_page
.word v4wb_flush_kern_tlb_page
.long v4wb_flush_kern_tlb_all
.long v4wb_flush_user_tlb_mm
.long v4wb_flush_user_tlb_range
.long v4wb_flush_user_tlb_page
.long v4wb_flush_kern_tlb_page
ENTRY(v4wbi_tlb_fns)
.word v4wbi_flush_kern_tlb_all
.word v4wbi_flush_user_tlb_mm
.word v4wbi_flush_user_tlb_range
.word v4wbi_flush_user_tlb_page
.word v4wbi_flush_kern_tlb_page
.long v4wbi_flush_kern_tlb_all
.long v4wbi_flush_user_tlb_mm
.long v4wbi_flush_user_tlb_range
.long v4wbi_flush_user_tlb_page
.long v4wbi_flush_kern_tlb_page
......@@ -6,7 +6,7 @@
# To add an entry into this database, please see Documentation/arm/README,
# or contact rmk@arm.linux.org.uk
#
# Last update: Sun Feb 24 17:43:42 2002
# Last update: Fri Mar 8 20:08:02 2002
#
# machine_is_xxx CONFIG_xxxx MACH_TYPE_xxx number
#
......@@ -147,7 +147,7 @@ whitechapel SA1100_WHITECHAPEL WHITECHAPEL 135
h3100 SA1100_H3100 H3100 136
h3800 SA1100_H3800 H3800 137
blue_v1 ARCH_BLUE_V1 BLUE_V1 138
xscale_cerf ARCH_XSCALE_CERF XSCALE_CERF 139
pxa_cerf ARCH_PXA_CERF PXA_CERF 139
arm7tevb ARCH_ARM7TEVB ARM7TEVB 140
d7400 ARCH_D7400 D7400 141
piranha ARCH_PIRANHA PIRANHA 142
......@@ -174,3 +174,5 @@ h7202 ARCH_H7202 H7202 162
amico ARCH_AMICO AMICO 163
iam SA1100_IAM IAM 164
tt530 SA1100_TT530 TT530 165
sam2400 ARCH_SAM2400 SAM2400 166
jornada56x ARCH_JORNADA56X JORNADA56X 167
......@@ -19,6 +19,8 @@
#define NO_IRQ ((unsigned int)(-1))
#endif
struct irqaction;
#define disable_irq_nosync(i) disable_irq(i)
extern void disable_irq(unsigned int);
......@@ -38,5 +40,7 @@ extern void enable_irq(unsigned int);
int set_irq_type(unsigned int irq, unsigned int type);
int setup_irq(unsigned int, struct irqaction *);
#endif
......@@ -75,7 +75,6 @@ extern void (*init_arch_irq)(void);
extern void init_FIQ(void);
extern int show_fiq_list(struct seq_file *, void *);
void __set_irq_handler(unsigned int irq, irq_handler_t, int);
int setup_irq(unsigned int, struct irqaction *);
/*
* External stuff.
......
......@@ -61,6 +61,7 @@ u8 pci_std_swizzle(struct pci_dev *dev, u8 *pinp);
*/
extern int iop310_setup(int nr, struct pci_sys_data *);
extern struct pci_bus *iop310_scan_bus(int nr, struct pci_sys_data *);
extern void iop310_init(void);
extern int dc21285_setup(int nr, struct pci_sys_data *);
extern struct pci_bus *dc21285_scan_bus(int nr, struct pci_sys_data *);
......@@ -75,4 +76,3 @@ extern int pci_v3_setup(int nr, struct pci_sys_data *);
extern struct pci_bus *pci_v3_scan_bus(int nr, struct pci_sys_data *);
extern void pci_v3_preinit(void);
extern void pci_v3_postinit(void);
......@@ -26,13 +26,10 @@ struct sa1100_port_fns {
void (*close)(struct uart_port *, struct uart_info *);
};
#if defined(CONFIG_SERIAL_SA1100) && !defined(CONFIG_SERIAL_SA1100_OLD)
#ifdef CONFIG_SERIAL_SA1100
void sa1100_register_uart_fns(struct sa1100_port_fns *fns);
void sa1100_register_uart(int idx, int port);
#else
#define sa1100_register_uart_fns(fns) do { } while (0)
#define sa1100_register_uart(idx,port) do { } while (0)
#endif
void sa1100_uart1_altgpio(void);
......@@ -58,74 +58,6 @@
} \
} while (0)
/*
* This flushes back any buffered write data. We have to clean the entries
* in the cache for this page. This does not invalidate either I or D caches.
*
* Called from:
* 1. fs/exec.c:put_dirty_page - ok
* - page came from alloc_page(), so page->mapping = NULL.
* - flush_dcache_page called immediately prior.
*
* 2. kernel/ptrace.c:access_one_page - flush_icache_page
* - flush_cache_page takes care of the user space side of the mapping.
* - page is either a page cache page (with page->mapping set, and
* hence page->mapping->i_mmap{,shared} also set) or an anonymous
* page. I think this is ok.
*
* 3. kernel/ptrace.c:access_one_page - bad
* - flush_cache_page takes care of the user space side of the mapping.
* - no apparant cache protection, reading the kernel virtual alias
*
* 4. mm/filemap.c:filemap_no_page - ok
* - add_to_page_cache_* clears PG_arch_1.
* - page->mapping != NULL.
* - i_mmap or i_mmap_shared will be non-null if mmap'd
* - called from (8).
*
* 5. mm/memory.c:break_cow,do_wp_page - {copy,clear}_user_page
* - need to ensure that copy_cow_page has pushed all data from the dcache
* to the page.
* - calls
* - clear_user_highpage -> clear_user_page
* - copy_user_highpage -> copy_user_page
*
* 6. mm/memory.c:do_swap_page - flush_icache_page
* - flush_icache_page called afterwards - if flush_icache_page does the
* same as flush_dcache_page, update_mmu_cache will do the work for us.
* - update_mmu_cache called.
*
* 7. mm/memory.c:do_anonymous_page - {copy,clear}_user_page
* - calls clear_user_highpage. See (5)
*
* 8. mm/memory.c:do_no_page - flush_icache_page
* - flush_icache_page called afterwards - if flush_icache_page does the
* same as flush_dcache_page, update_mmu_cache will do the work for us.
* - update_mmu_cache called.
* - When we place a user mapping, we will call update_mmu_cache,
* which will catch PG_arch_1 set.
*
* 9. mm/shmem.c:shmem_no_page - ok
* - shmem_get_page clears PG_arch_1, as does add_to_page_cache (duplicate)
* - page->mapping != NULL.
* - i_mmap or i_mmap_shared will be non-null if mmap'd
* - called from (8).
*
* 10. mm/swapfile.c:try_to_unuse - bad
* - this looks really dodgy - we're putting pages from the swap cache
* straight into processes, and the only cache handling appears to
* be flush_page_to_ram.
*/
#define flush_page_to_ram_ok
#ifdef flush_page_to_ram_ok
#define flush_page_to_ram(page) do { } while (0)
#else
static __inline__ void flush_page_to_ram(struct page *page)
{
cpu_flush_ram_page(page_address(page));
}
#endif
/*
* D cache only
*/
......@@ -134,8 +66,16 @@ static __inline__ void flush_page_to_ram(struct page *page)
#define clean_dcache_range(_s,_e) cpu_dcache_clean_range((_s),(_e))
#define flush_dcache_range(_s,_e) cpu_cache_clean_invalidate_range((_s),(_e),0)
#define mapping_mapped(map) (!list_empty(&(map)->i_mmap) || \
!list_empty(&(map)->i_mmap_shared))
#define clean_dcache_area(start,size) \
cpu_cache_clean_invalidate_range((unsigned long)start, \
((unsigned long)start) + size, 0);
/*
* This is an obsolete interface; the functionality that was provided by this
* function is now merged into our flush_dcache_page, flush_icache_page,
* copy_user_page and clear_user_page functions.
*/
#define flush_page_to_ram(page) do { } while (0)
/*
* flush_dcache_page is used when the kernel has written to the page
......@@ -150,39 +90,31 @@ static __inline__ void flush_page_to_ram(struct page *page)
* about to change to user space. This is the same method as used on SPARC64.
* See update_mmu_cache for the user space part.
*/
#define mapping_mapped(map) (!list_empty(&(map)->i_mmap) || \
!list_empty(&(map)->i_mmap_shared))
static inline void __flush_dcache_page(struct page *page)
{
unsigned long virt = (unsigned long)page_address(page);
cpu_cache_clean_invalidate_range(virt, virt + PAGE_SIZE, 0);
}
static inline void flush_dcache_page(struct page *page)
{
if (page->mapping && !mapping_mapped(page->mapping))
set_bit(PG_dcache_dirty, &page->flags);
else {
unsigned long virt = (unsigned long)page_address(page);
cpu_cache_clean_invalidate_range(virt, virt + PAGE_SIZE, 0);
}
else
__flush_dcache_page(page);
}
#define flush_icache_user_range(vma,page,addr,len) \
flush_dcache_page(page)
/*
* flush_icache_page makes the kernel page address consistent with the
* user space mappings. The functionality is the same as flush_dcache_page,
* except we can do an optimisation and only clean the caches here if
* vma->vm_mm == current->active_mm.
*
* This function is misnamed IMHO. There are three places where it
* is called, each of which is preceded immediately by a call to
* flush_page_to_ram:
* We don't appear to need to do anything here. In fact, if we did, we'd
* duplicate cache flushing elsewhere performed by flush_dcache_page().
*/
#ifdef flush_page_to_ram_ok
static inline void flush_icache_page(struct vm_area_struct *vma, struct page *page)
{
if (page->mapping && !mapping_mapped(page->mapping))
set_bit(PG_dcache_dirty, &page->flags);
else if (vma->vm_mm == current->active_mm) {
unsigned long virt = (unsigned long)page_address(page);
cpu_cache_clean_invalidate_range(virt, virt + PAGE_SIZE, 0);
}
}
#else
#define flush_icache_page(vma,pg) do { } while (0)
#endif
#define flush_icache_page(vma,page) do { } while (0)
#define clean_dcache_entry(_s) cpu_dcache_clean_entry((unsigned long)(_s))
......
......@@ -38,13 +38,13 @@
: : "r" (x)); \
} while (0)
#define modify_domain(dom,type) \
do { \
unsigned int domain = current->thread.domain; \
domain &= ~domain_val(dom, DOMAIN_MANAGER); \
domain |= domain_val(dom, type); \
current->thread.domain = domain; \
set_domain(current->thread.domain); \
#define modify_domain(dom,type) \
do { \
struct thread_info *thread = current_thread_info(); \
unsigned int domain = thread->cpu_domain; \
domain &= ~domain_val(dom, DOMAIN_MANAGER); \
thread->cpu_domain = domain | domain_val(dom, type); \
set_domain(thread->cpu_domain); \
} while (0)
#endif
......@@ -107,7 +107,8 @@ typedef struct siginfo {
#define SI_MESGQ -3 /* sent by real time mesq state change */
#define SI_ASYNCIO -4 /* sent by AIO completion */
#define SI_SIGIO -5 /* sent by queued SIGIO */
#define SI_TKILL -6 /* sent by tkill system call */
#define SI_TKILL -6 /* sent by tkill system call */
#define SI_DETHREAD -7 /* sent by execve() killing subsidiary threads */
#define SI_FROMUSER(siptr) ((siptr)->si_code <= 0)
#define SI_FROMKERNEL(siptr) ((siptr)->si_code > 0)
......
......@@ -84,7 +84,6 @@ extern struct task_struct *__switch_to(struct thread_info *, struct thread_info
#define stf() __stf()
#define save_flags(x) __save_flags(x)
#define restore_flags(x) __restore_flags(x)
#define save_flags_cli(x) __save_flags_cli(x)
#endif /* CONFIG_SMP */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment