Commit 2b8f2f97 authored by David Mosberger's avatar David Mosberger

ia64: Make v2.5.8-pre3 work on ia64.

parent 40574c27
......@@ -47,6 +47,8 @@ static void elf32_set_personality (void);
#define ELF_PLAT_INIT(_r) ia64_elf32_init(_r)
#define setup_arg_pages(bprm) ia32_setup_arg_pages(bprm)
#define elf_map elf32_map
#undef SET_PERSONALITY
#define SET_PERSONALITY(ex, ibcs2) elf32_set_personality()
/* Ugly but avoids duplication */
......
......@@ -55,7 +55,7 @@ struct illegal_op_return
ia64_emulate_brl (struct pt_regs *regs, unsigned long ar_ec)
{
unsigned long bundle[2];
unsigned long opcode, btype, qp, offset;
unsigned long opcode, btype, qp, offset, cpl;
unsigned long next_ip;
struct siginfo siginfo;
struct illegal_op_return rv;
......@@ -158,9 +158,9 @@ ia64_emulate_brl (struct pt_regs *regs, unsigned long ar_ec)
* AR[PFS].pec = AR[EC]
* AR[PFS].ppl = PSR.cpl
*/
cpl = ia64_psr(regs)->cpl;
regs->ar_pfs = ((regs->cr_ifs & 0x3fffffffff)
| (ar_ec << 52)
| ((unsigned long) ia64_psr(regs)->cpl << 62));
| (ar_ec << 52) | (cpl << 62));
/*
* CFM.sof -= CFM.sol
......
......@@ -36,6 +36,7 @@
#include <asm/kregs.h>
#include <asm/offsets.h>
#include <asm/pgtable.h>
#include <asm/percpu.h>
#include <asm/processor.h>
#include <asm/thread_info.h>
#include <asm/unistd.h>
......@@ -1208,8 +1209,8 @@ sys_call_table:
data8 sys_fremovexattr
data8 sys_tkill
data8 sys_futex // 1230
data8 ia64_ni_syscall
data8 ia64_ni_syscall
data8 sys_sched_setaffinity
data8 sys_sched_getaffinity
data8 ia64_ni_syscall
data8 ia64_ni_syscall
data8 ia64_ni_syscall // 1235
......
......@@ -67,7 +67,7 @@ EXPORT_SYMBOL(kernel_thread);
EXPORT_SYMBOL(last_cli_ip);
#endif
#include <asm/pgalloc.h>
#include <asm/tlbflush.h>
EXPORT_SYMBOL(flush_tlb_range);
......
......@@ -353,7 +353,9 @@ static int
verify_guid (efi_guid_t *test, efi_guid_t *target)
{
int rc;
#ifdef IA64_MCA_DEBUG_INFO
char out[40];
#endif
if ((rc = efi_guidcmp(*test, *target))) {
IA64_MCA_DEBUG(KERN_DEBUG
......
......@@ -273,9 +273,7 @@ pcibios_enable_device (struct pci_dev *dev)
/* Not needed, since we enable all devices at startup. */
printk(KERN_INFO "PCI: Found IRQ %d for device %s\n", dev->irq,
dev->slot_name);
printk(KERN_INFO "PCI: Found IRQ %d for device %s\n", dev->irq, dev->slot_name);
return 0;
}
......
......@@ -24,7 +24,7 @@
#include <asm/efi.h>
#include <asm/elf.h>
#include <asm/perfmon.h>
#include <asm/pgtable.h>
#include <asm/pgalloc.h>
#include <asm/processor.h>
#include <asm/sal.h>
#include <asm/uaccess.h>
......@@ -145,7 +145,7 @@ do_notify_resume_user (sigset_t *oldset, struct sigscratch *scr, long in_syscall
/*
* We use this if we don't have any better idle routine..
*/
static void
void
default_idle (void)
{
/* may want to do PAL_LIGHT_HALT here... */
......@@ -660,7 +660,7 @@ dup_task_struct(struct task_struct *orig)
{
struct task_struct *tsk;
tsk = __get_free_pages(GFP_KERNEL, KERNEL_STACK_SIZE_ORDER);
tsk = (void *) __get_free_pages(GFP_KERNEL, KERNEL_STACK_SIZE_ORDER);
if (!tsk)
return NULL;
......
/*
* IA-64 semaphore implementation (derived from x86 version).
*
* Copyright (C) 1999-2000 Hewlett-Packard Co
* Copyright (C) 1999-2000 David Mosberger-Tang <davidm@hpl.hp.com>
* Copyright (C) 1999-2000, 2002 Hewlett-Packard Co
* David Mosberger-Tang <davidm@hpl.hp.com>
*/
/*
......@@ -25,6 +25,7 @@
*/
#include <linux/sched.h>
#include <asm/errno.h>
#include <asm/semaphore.h>
/*
......
......@@ -54,7 +54,10 @@
extern char _end;
#ifdef CONFIG_SMP
unsigned long __per_cpu_offset[NR_CPUS];
#endif
struct cpuinfo_ia64 cpu_info __per_cpu_data;
unsigned long ia64_phys_stacked_size_p8;
......@@ -527,20 +530,25 @@ setup_per_cpu_areas (void)
void
cpu_init (void)
{
extern char __per_cpu_start[], __phys_per_cpu_start[], __per_cpu_end[];
extern char __per_cpu_start[], __phys_per_cpu_start[];
extern void __init ia64_mmu_init (void *);
unsigned long num_phys_stacked;
pal_vm_info_2_u_t vmi;
unsigned int max_ctx;
struct cpuinfo_ia64 *my_cpu_info;
void *my_cpu_data;
#ifdef CONFIG_SMP
extern char __per_cpu_end[];
int cpu = smp_processor_id();
my_cpu_data = alloc_bootmem_pages(__per_cpu_end - __per_cpu_start);
memcpy(my_cpu_data, __phys_per_cpu_start, __per_cpu_end - __per_cpu_start);
__per_cpu_offset[cpu] = (char *) my_cpu_data - __per_cpu_start;
my_cpu_info = my_cpu_data + ((char *) &cpu_info - __per_cpu_start);
#else
my_cpu_data = __phys_per_cpu_start;
#endif
my_cpu_info = my_cpu_data + ((char *) &cpu_info - __per_cpu_start);
/*
......
......@@ -47,7 +47,7 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
/* At this point: (!vmm || addr < vmm->vm_end). */
if (TASK_SIZE - len < addr)
return -ENOMEM;
if (rgn_offset(addr) + len > RGN_MAP_LIMIT) /* no risk of overflow here... */
if (REGION_OFFSET(addr) + len > RGN_MAP_LIMIT) /* no risk of overflow here... */
return -ENOMEM;
if (!vmm || addr + len <= vmm->vm_start)
return addr;
......@@ -126,7 +126,7 @@ ia64_brk (unsigned long brk, long arg1, long arg2, long arg3,
}
/* Check against unimplemented/unmapped addresses: */
if ((newbrk - oldbrk) > RGN_MAP_LIMIT || rgn_offset(newbrk) > RGN_MAP_LIMIT)
if ((newbrk - oldbrk) > RGN_MAP_LIMIT || REGION_OFFSET(newbrk) > RGN_MAP_LIMIT)
goto out;
/* Check against rlimit.. */
......@@ -206,7 +206,7 @@ do_mmap2 (unsigned long addr, unsigned long len, int prot, int flags, int fd, un
* or across a region boundary. Note: RGN_MAP_LIMIT is equal to 2^n-PAGE_SIZE
* (for some integer n <= 61) and len > 0.
*/
roff = rgn_offset(addr);
roff = REGION_OFFSET(addr);
if ((len > RGN_MAP_LIMIT) || (roff > (RGN_MAP_LIMIT - len))) {
addr = -EINVAL;
goto out;
......
......@@ -120,15 +120,15 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re
if (!(prev_vma && (prev_vma->vm_flags & VM_GROWSUP) && (address == prev_vma->vm_end))) {
if (!(vma->vm_flags & VM_GROWSDOWN))
goto bad_area;
if (rgn_index(address) != rgn_index(vma->vm_start)
|| rgn_offset(address) >= RGN_MAP_LIMIT)
if (REGION_NUMBER(address) != REGION_NUMBER(vma->vm_start)
|| REGION_OFFSET(address) >= RGN_MAP_LIMIT)
goto bad_area;
if (expand_stack(vma, address))
goto bad_area;
} else {
vma = prev_vma;
if (rgn_index(address) != rgn_index(vma->vm_start)
|| rgn_offset(address) >= RGN_MAP_LIMIT)
if (REGION_NUMBER(address) != REGION_NUMBER(vma->vm_start)
|| REGION_OFFSET(address) >= RGN_MAP_LIMIT)
goto bad_area;
if (expand_backing_store(vma, address))
goto bad_area;
......
......@@ -40,10 +40,10 @@ static unsigned long totalram_pages;
static int pgt_cache_water[2] = { 25, 50 };
int
void
check_pgt_cache (void)
{
int low, high, freed = 0;
int low, high;
low = pgt_cache_water[0];
high = pgt_cache_water[1];
......@@ -51,12 +51,11 @@ check_pgt_cache (void)
if (pgtable_cache_size > high) {
do {
if (pgd_quicklist)
free_page((unsigned long)pgd_alloc_one_fast(0)), ++freed;
free_page((unsigned long)pgd_alloc_one_fast(0));
if (pmd_quicklist)
free_page((unsigned long)pmd_alloc_one_fast(0, 0)), ++freed;
free_page((unsigned long)pmd_alloc_one_fast(0, 0));
} while (pgtable_cache_size > low);
}
return freed;
}
/*
......
......@@ -16,10 +16,11 @@
#include <linux/smp.h>
#include <linux/mm.h>
#include <asm/delay.h>
#include <asm/mmu_context.h>
#include <asm/pgalloc.h>
#include <asm/pal.h>
#include <asm/delay.h>
#include <asm/tlbflush.h>
#define SUPPORTED_PGBITS ( \
1 << _PAGE_SIZE_256M | \
......
......@@ -6,8 +6,6 @@
* David Mosberger-Tang <davidm@hpl.hp.com>
*/
#include <linux/mm.h>
#include <asm/bitops.h>
#include <asm/page.h>
......@@ -32,7 +30,7 @@ extern void flush_icache_range (unsigned long start, unsigned long end);
#define flush_icache_user_range(vma, page, user_addr, len) \
do { \
unsigned long _addr = page_address(page) + ((user_addr) & ~PAGE_MASK); \
unsigned long _addr = (unsigned long) page_address(page) + ((user_addr) & ~PAGE_MASK); \
flush_icache_range(_addr, _addr + (len)); \
} while (0)
......
......@@ -150,6 +150,31 @@ pte_free_kernel (pte_t *pte)
free_page((unsigned long) pte);
}
extern int do_check_pgt_cache (int, int);
extern void check_pgt_cache (void);
/*
* IA-64 doesn't have any external MMU info: the page tables contain all the necessary
* information. However, we use this macro to take care of any (delayed) i-cache flushing
* that may be necessary.
*/
static inline void
update_mmu_cache (struct vm_area_struct *vma, unsigned long vaddr, pte_t pte)
{
unsigned long addr;
struct page *page;
if (!pte_exec(pte))
return; /* not an executable page... */
page = pte_page(pte);
/* don't use VADDR: it may not be mapped on this CPU (or may have just been flushed): */
addr = (unsigned long) page_address(page);
if (test_bit(PG_arch_1, &page->flags))
return; /* i-cache is already coherent with d-cache */
flush_icache_range(addr, addr + PAGE_SIZE);
set_bit(PG_arch_1, &page->flags); /* mark page as clean */
}
#endif /* _ASM_IA64_PGALLOC_H */
......@@ -14,7 +14,6 @@
#include <linux/config.h>
#include <asm/cacheflush.h>
#include <asm/mman.h>
#include <asm/page.h>
#include <asm/processor.h>
......@@ -122,6 +121,7 @@
# ifndef __ASSEMBLY__
#include <asm/bitops.h>
#include <asm/cacheflush.h>
#include <asm/mmu_context.h>
#include <asm/processor.h>
......@@ -406,31 +406,6 @@ pte_same (pte_t a, pte_t b)
extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
extern void paging_init (void);
/*
* IA-64 doesn't have any external MMU info: the page tables contain all the necessary
* information. However, we use this macro to take care of any (delayed) i-cache flushing
* that may be necessary.
*/
static inline void
update_mmu_cache (struct vm_area_struct *vma, unsigned long vaddr, pte_t pte)
{
unsigned long addr;
struct page *page;
if (!pte_exec(pte))
return; /* not an executable page... */
page = pte_page(pte);
/* don't use VADDR: it may not be mapped on this CPU (or may have just been flushed): */
addr = (unsigned long) page_address(page);
if (test_bit(PG_arch_1, &page->flags))
return; /* i-cache is already coherent with d-cache */
flush_icache_range(addr, addr + PAGE_SIZE);
set_bit(PG_arch_1, &page->flags); /* mark page as clean */
}
#define SWP_TYPE(entry) (((entry).val >> 1) & 0xff)
#define SWP_OFFSET(entry) (((entry).val << 1) >> 10)
#define SWP_ENTRY(type,offset) ((swp_entry_t) { ((type) << 1) | ((long) (offset) << 9) })
......
......@@ -15,7 +15,7 @@
#include <linux/config.h>
#include <linux/compiler.h>
#include <linux/percpu.h>
#include <asm/ptrace.h>
#include <asm/kregs.h>
......@@ -186,10 +186,6 @@
*/
#define IA64_USEC_PER_CYC_SHIFT 41
#define __HAVE_ARCH_PER_CPU
#define THIS_CPU(var) (var)
#ifndef __ASSEMBLY__
#include <linux/threads.h>
......@@ -202,11 +198,6 @@
#include <asm/unwind.h>
#include <asm/atomic.h>
extern unsigned long __per_cpu_offset[NR_CPUS];
#define per_cpu(var, cpu) (*(__typeof__(&(var))) ((void *) &(var) + __per_cpu_offset[cpu]))
#define this_cpu(var) (var)
/* like above but expressed as bitfields for more efficient access: */
struct ia64_psr {
__u64 reserved0 : 1;
......
......@@ -15,6 +15,8 @@
#define TI_ADDR_LIMIT 0x10
#define TI_PRE_COUNT 0x18
#define PREEMPT_ACTIVE 0x4000000
#ifndef __ASSEMBLY__
/*
......
......@@ -57,7 +57,8 @@
* point inside the virtually mapped linear page table.
*/
#define __access_ok(addr,size,segment) (((unsigned long) (addr)) <= (segment).seg \
&& ((segment).seg == KERNEL_DS.seg || rgn_offset((unsigned long) (addr)) < RGN_MAP_LIMIT))
&& ((segment).seg == KERNEL_DS.seg \
|| REGION_OFFSET((unsigned long) (addr)) < RGN_MAP_LIMIT))
#define access_ok(type,addr,size) __access_ok((addr),(size),get_fs())
static inline int
......
......@@ -220,6 +220,8 @@
#define __NR_fremovexattr 1228
#define __NR_tkill 1229
#define __NR_futex 1230
#define __NR_sched_setaffinity 1231
#define __NR_sched_getaffinity 1232
#if !defined(__ASSEMBLY__) && !defined(ASSEMBLER)
......
......@@ -11,6 +11,8 @@
extern void setup_serial_acpi(void *);
#define ACPI_SIG_LEN 4
/* ACPI table signatures */
#define ACPI_SPCRT_SIGNATURE "SPCR"
#define ACPI_DBGPT_SIGNATURE "DBGP"
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment