Commit 5d01e6ce authored by Mikael Starvik's avatar Mikael Starvik Committed by Linus Torvalds

[PATCH] CRIS update: updates for 2.6.12

Patches to make CRIS work with 2.6.12.
Signed-off-by: default avatarMikael Starvik <starvik@axis.com>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent dcf1310b
...@@ -11,6 +11,7 @@ ...@@ -11,6 +11,7 @@
#include <linux/ptrace.h> #include <linux/ptrace.h>
#include <linux/user.h> #include <linux/user.h>
#include <linux/signal.h> #include <linux/signal.h>
#include <linux/security.h>
#include <asm/uaccess.h> #include <asm/uaccess.h>
#include <asm/page.h> #include <asm/page.h>
...@@ -86,9 +87,13 @@ sys_ptrace(long request, long pid, long addr, long data) ...@@ -86,9 +87,13 @@ sys_ptrace(long request, long pid, long addr, long data)
ret = -EPERM; ret = -EPERM;
if (request == PTRACE_TRACEME) { if (request == PTRACE_TRACEME) {
/* are we already being traced? */
if (current->ptrace & PT_PTRACED) if (current->ptrace & PT_PTRACED)
goto out; goto out;
ret = security_ptrace(current->parent, current);
if (ret)
goto out;
/* set the ptrace bit in the process flags. */
current->ptrace |= PT_PTRACED; current->ptrace |= PT_PTRACED;
ret = 0; ret = 0;
goto out; goto out;
...@@ -207,7 +212,7 @@ sys_ptrace(long request, long pid, long addr, long data) ...@@ -207,7 +212,7 @@ sys_ptrace(long request, long pid, long addr, long data)
case PTRACE_KILL: case PTRACE_KILL:
ret = 0; ret = 0;
if (child->state == TASK_ZOMBIE) if (child->exit_state == EXIT_ZOMBIE)
break; break;
child->exit_code = SIGKILL; child->exit_code = SIGKILL;
......
...@@ -32,7 +32,7 @@ void *module_alloc(unsigned long size) ...@@ -32,7 +32,7 @@ void *module_alloc(unsigned long size)
{ {
if (size == 0) if (size == 0)
return NULL; return NULL;
return vmalloc(size); return vmalloc_exec(size);
} }
...@@ -59,26 +59,8 @@ int apply_relocate(Elf32_Shdr *sechdrs, ...@@ -59,26 +59,8 @@ int apply_relocate(Elf32_Shdr *sechdrs,
unsigned int relsec, unsigned int relsec,
struct module *me) struct module *me)
{ {
unsigned int i; printk(KERN_ERR "module %s: REL relocation unsupported\n", me->name);
Elf32_Rel *rel = (void *)sechdrs[relsec].sh_addr; return -ENOEXEC;
Elf32_Sym *sym;
uint32_t *location;
DEBUGP("Applying relocate section %u to %u\n", relsec,
sechdrs[relsec].sh_info);
for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
/* This is where to make the change */
location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_offset
+ rel[i].r_offset;
/* This is the symbol it is referring to. Note that all
undefined symbols have been resolved. */
sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
+ ELF32_R_SYM(rel[i].r_info);
/* We add the value into the location given */
*location += sym->st_value;
}
return 0;
} }
int apply_relocate_add(Elf32_Shdr *sechdrs, int apply_relocate_add(Elf32_Shdr *sechdrs,
...@@ -90,7 +72,7 @@ int apply_relocate_add(Elf32_Shdr *sechdrs, ...@@ -90,7 +72,7 @@ int apply_relocate_add(Elf32_Shdr *sechdrs,
unsigned int i; unsigned int i;
Elf32_Rela *rela = (void *)sechdrs[relsec].sh_addr; Elf32_Rela *rela = (void *)sechdrs[relsec].sh_addr;
DEBUGP ("Applying relocate section %u to %u\n", relsec, DEBUGP ("Applying add relocate section %u to %u\n", relsec,
sechdrs[relsec].sh_info); sechdrs[relsec].sh_info);
for (i = 0; i < sechdrs[relsec].sh_size / sizeof (*rela); i++) { for (i = 0; i < sechdrs[relsec].sh_size / sizeof (*rela); i++) {
...@@ -103,7 +85,18 @@ int apply_relocate_add(Elf32_Shdr *sechdrs, ...@@ -103,7 +85,18 @@ int apply_relocate_add(Elf32_Shdr *sechdrs,
Elf32_Sym *sym Elf32_Sym *sym
= ((Elf32_Sym *)sechdrs[symindex].sh_addr = ((Elf32_Sym *)sechdrs[symindex].sh_addr
+ ELF32_R_SYM (rela[i].r_info)); + ELF32_R_SYM (rela[i].r_info));
switch (ELF32_R_TYPE(rela[i].r_info)) {
case R_CRIS_32:
*loc = sym->st_value + rela[i].r_addend; *loc = sym->st_value + rela[i].r_addend;
break;
case R_CRIS_32_PCREL:
*loc = sym->st_value - (unsigned)loc + rela[i].r_addend - 4;
break;
default:
printk(KERN_ERR "module %s: Unknown relocation: %u\n",
me->name, ELF32_R_TYPE(rela[i].r_info));
return -ENOEXEC;
}
} }
return 0; return 0;
......
/* $Id: process.c,v 1.17 2004/04/05 13:53:48 starvik Exp $ /* $Id: process.c,v 1.21 2005/03/04 08:16:17 starvik Exp $
* *
* linux/arch/cris/kernel/process.c * linux/arch/cris/kernel/process.c
* *
...@@ -8,6 +8,18 @@ ...@@ -8,6 +8,18 @@
* Authors: Bjorn Wesen (bjornw@axis.com) * Authors: Bjorn Wesen (bjornw@axis.com)
* *
* $Log: process.c,v $ * $Log: process.c,v $
* Revision 1.21 2005/03/04 08:16:17 starvik
* Merge of Linux 2.6.11.
*
* Revision 1.20 2005/01/18 05:57:22 starvik
* Renamed hlt_counter to cris_hlt_counter and made it global.
*
* Revision 1.19 2004/10/19 13:07:43 starvik
* Merge of Linux 2.6.9
*
* Revision 1.18 2004/08/16 12:37:23 starvik
* Merge of Linux 2.6.8
*
* Revision 1.17 2004/04/05 13:53:48 starvik * Revision 1.17 2004/04/05 13:53:48 starvik
* Merge of Linux 2.6.5 * Merge of Linux 2.6.5
* *
...@@ -161,18 +173,18 @@ EXPORT_SYMBOL(init_task); ...@@ -161,18 +173,18 @@ EXPORT_SYMBOL(init_task);
* region by enable_hlt/disable_hlt. * region by enable_hlt/disable_hlt.
*/ */
static int hlt_counter=0; int cris_hlt_counter=0;
void disable_hlt(void) void disable_hlt(void)
{ {
hlt_counter++; cris_hlt_counter++;
} }
EXPORT_SYMBOL(disable_hlt); EXPORT_SYMBOL(disable_hlt);
void enable_hlt(void) void enable_hlt(void)
{ {
hlt_counter--; cris_hlt_counter--;
} }
EXPORT_SYMBOL(enable_hlt); EXPORT_SYMBOL(enable_hlt);
...@@ -195,16 +207,19 @@ void cpu_idle (void) ...@@ -195,16 +207,19 @@ void cpu_idle (void)
/* endless idle loop with no priority at all */ /* endless idle loop with no priority at all */
while (1) { while (1) {
while (!need_resched()) { while (!need_resched()) {
void (*idle)(void) = pm_idle; void (*idle)(void);
/*
* Mark this as an RCU critical section so that
* synchronize_kernel() in the unload path waits
* for our completion.
*/
idle = pm_idle;
if (!idle) if (!idle)
idle = default_idle; idle = default_idle;
idle(); idle();
} }
schedule(); schedule();
} }
} }
void hard_reset_now (void); void hard_reset_now (void);
......
...@@ -51,7 +51,7 @@ extern inline unsigned long ffz(unsigned long w) ...@@ -51,7 +51,7 @@ extern inline unsigned long ffz(unsigned long w)
* *
* Undefined if no bit exists, so code should check against 0 first. * Undefined if no bit exists, so code should check against 0 first.
*/ */
extern __inline__ unsigned long __ffs(unsigned long word) extern inline unsigned long __ffs(unsigned long word)
{ {
return cris_swapnwbrlz(~word); return cris_swapnwbrlz(~word);
} }
......
...@@ -25,7 +25,7 @@ ...@@ -25,7 +25,7 @@
#define THREAD_usp 4 /* offsetof(struct thread_struct, usp) */ #define THREAD_usp 4 /* offsetof(struct thread_struct, usp) */
#define THREAD_dccr 8 /* offsetof(struct thread_struct, dccr) */ #define THREAD_dccr 8 /* offsetof(struct thread_struct, dccr) */
#define TASK_pid 133 /* offsetof(struct task_struct, pid) */ #define TASK_pid 141 /* offsetof(struct task_struct, pid) */
#define LCLONE_VM 256 /* CLONE_VM */ #define LCLONE_VM 256 /* CLONE_VM */
#define LCLONE_UNTRACED 8388608 /* CLONE_UNTRACED */ #define LCLONE_UNTRACED 8388608 /* CLONE_UNTRACED */
......
...@@ -16,6 +16,7 @@ ...@@ -16,6 +16,7 @@
#include <asm/arch/bitops.h> #include <asm/arch/bitops.h>
#include <asm/system.h> #include <asm/system.h>
#include <asm/atomic.h>
#include <linux/compiler.h> #include <linux/compiler.h>
/* /*
...@@ -88,7 +89,7 @@ struct __dummy { unsigned long a[100]; }; ...@@ -88,7 +89,7 @@ struct __dummy { unsigned long a[100]; };
* It also implies a memory barrier. * It also implies a memory barrier.
*/ */
extern inline int test_and_set_bit(int nr, void *addr) extern inline int test_and_set_bit(int nr, volatile unsigned long *addr)
{ {
unsigned int mask, retval; unsigned int mask, retval;
unsigned long flags; unsigned long flags;
...@@ -96,15 +97,15 @@ extern inline int test_and_set_bit(int nr, void *addr) ...@@ -96,15 +97,15 @@ extern inline int test_and_set_bit(int nr, void *addr)
adr += nr >> 5; adr += nr >> 5;
mask = 1 << (nr & 0x1f); mask = 1 << (nr & 0x1f);
local_save_flags(flags); cris_atomic_save(addr, flags);
local_irq_disable();
retval = (mask & *adr) != 0; retval = (mask & *adr) != 0;
*adr |= mask; *adr |= mask;
cris_atomic_restore(addr, flags);
local_irq_restore(flags); local_irq_restore(flags);
return retval; return retval;
} }
extern inline int __test_and_set_bit(int nr, void *addr) extern inline int __test_and_set_bit(int nr, volatile unsigned long *addr)
{ {
unsigned int mask, retval; unsigned int mask, retval;
unsigned int *adr = (unsigned int *)addr; unsigned int *adr = (unsigned int *)addr;
...@@ -131,7 +132,7 @@ extern inline int __test_and_set_bit(int nr, void *addr) ...@@ -131,7 +132,7 @@ extern inline int __test_and_set_bit(int nr, void *addr)
* It also implies a memory barrier. * It also implies a memory barrier.
*/ */
extern inline int test_and_clear_bit(int nr, void *addr) extern inline int test_and_clear_bit(int nr, volatile unsigned long *addr)
{ {
unsigned int mask, retval; unsigned int mask, retval;
unsigned long flags; unsigned long flags;
...@@ -139,11 +140,10 @@ extern inline int test_and_clear_bit(int nr, void *addr) ...@@ -139,11 +140,10 @@ extern inline int test_and_clear_bit(int nr, void *addr)
adr += nr >> 5; adr += nr >> 5;
mask = 1 << (nr & 0x1f); mask = 1 << (nr & 0x1f);
local_save_flags(flags); cris_atomic_save(addr, flags);
local_irq_disable();
retval = (mask & *adr) != 0; retval = (mask & *adr) != 0;
*adr &= ~mask; *adr &= ~mask;
local_irq_restore(flags); cris_atomic_restore(addr, flags);
return retval; return retval;
} }
...@@ -157,7 +157,7 @@ extern inline int test_and_clear_bit(int nr, void *addr) ...@@ -157,7 +157,7 @@ extern inline int test_and_clear_bit(int nr, void *addr)
* but actually fail. You must protect multiple accesses with a lock. * but actually fail. You must protect multiple accesses with a lock.
*/ */
extern inline int __test_and_clear_bit(int nr, void *addr) extern inline int __test_and_clear_bit(int nr, volatile unsigned long *addr)
{ {
unsigned int mask, retval; unsigned int mask, retval;
unsigned int *adr = (unsigned int *)addr; unsigned int *adr = (unsigned int *)addr;
...@@ -177,24 +177,23 @@ extern inline int __test_and_clear_bit(int nr, void *addr) ...@@ -177,24 +177,23 @@ extern inline int __test_and_clear_bit(int nr, void *addr)
* It also implies a memory barrier. * It also implies a memory barrier.
*/ */
extern inline int test_and_change_bit(int nr, void *addr) extern inline int test_and_change_bit(int nr, volatile unsigned long *addr)
{ {
unsigned int mask, retval; unsigned int mask, retval;
unsigned long flags; unsigned long flags;
unsigned int *adr = (unsigned int *)addr; unsigned int *adr = (unsigned int *)addr;
adr += nr >> 5; adr += nr >> 5;
mask = 1 << (nr & 0x1f); mask = 1 << (nr & 0x1f);
local_save_flags(flags); cris_atomic_save(addr, flags);
local_irq_disable();
retval = (mask & *adr) != 0; retval = (mask & *adr) != 0;
*adr ^= mask; *adr ^= mask;
local_irq_restore(flags); cris_atomic_restore(addr, flags);
return retval; return retval;
} }
/* WARNING: non atomic and it can be reordered! */ /* WARNING: non atomic and it can be reordered! */
extern inline int __test_and_change_bit(int nr, void *addr) extern inline int __test_and_change_bit(int nr, volatile unsigned long *addr)
{ {
unsigned int mask, retval; unsigned int mask, retval;
unsigned int *adr = (unsigned int *)addr; unsigned int *adr = (unsigned int *)addr;
...@@ -215,7 +214,7 @@ extern inline int __test_and_change_bit(int nr, void *addr) ...@@ -215,7 +214,7 @@ extern inline int __test_and_change_bit(int nr, void *addr)
* This routine doesn't need to be atomic. * This routine doesn't need to be atomic.
*/ */
extern inline int test_bit(int nr, const void *addr) extern inline int test_bit(int nr, const volatile unsigned long *addr)
{ {
unsigned int mask; unsigned int mask;
unsigned int *adr = (unsigned int *)addr; unsigned int *adr = (unsigned int *)addr;
...@@ -259,7 +258,7 @@ extern inline int test_bit(int nr, const void *addr) ...@@ -259,7 +258,7 @@ extern inline int test_bit(int nr, const void *addr)
* @offset: The bitnumber to start searching at * @offset: The bitnumber to start searching at
* @size: The maximum size to search * @size: The maximum size to search
*/ */
extern inline int find_next_zero_bit (void * addr, int size, int offset) extern inline int find_next_zero_bit (const unsigned long * addr, int size, int offset)
{ {
unsigned long *p = ((unsigned long *) addr) + (offset >> 5); unsigned long *p = ((unsigned long *) addr) + (offset >> 5);
unsigned long result = offset & ~31UL; unsigned long result = offset & ~31UL;
...@@ -301,7 +300,7 @@ extern inline int find_next_zero_bit (void * addr, int size, int offset) ...@@ -301,7 +300,7 @@ extern inline int find_next_zero_bit (void * addr, int size, int offset)
* @offset: The bitnumber to start searching at * @offset: The bitnumber to start searching at
* @size: The maximum size to search * @size: The maximum size to search
*/ */
static __inline__ int find_next_bit(void *addr, int size, int offset) static __inline__ int find_next_bit(const unsigned long *addr, int size, int offset)
{ {
unsigned long *p = ((unsigned long *) addr) + (offset >> 5); unsigned long *p = ((unsigned long *) addr) + (offset >> 5);
unsigned long result = offset & ~31UL; unsigned long result = offset & ~31UL;
...@@ -367,7 +366,7 @@ static __inline__ int find_next_bit(void *addr, int size, int offset) ...@@ -367,7 +366,7 @@ static __inline__ int find_next_bit(void *addr, int size, int offset)
#define minix_test_bit(nr,addr) test_bit(nr,addr) #define minix_test_bit(nr,addr) test_bit(nr,addr)
#define minix_find_first_zero_bit(addr,size) find_first_zero_bit(addr,size) #define minix_find_first_zero_bit(addr,size) find_first_zero_bit(addr,size)
extern inline int sched_find_first_bit(unsigned long *b) extern inline int sched_find_first_bit(const unsigned long *b)
{ {
if (unlikely(b[0])) if (unlikely(b[0]))
return __ffs(b[0]); return __ffs(b[0]);
......
...@@ -17,8 +17,8 @@ enum km_type { ...@@ -17,8 +17,8 @@ enum km_type {
KM_PTE1, KM_PTE1,
KM_IRQ0, KM_IRQ0,
KM_IRQ1, KM_IRQ1,
KM_CRYPTO_USER, KM_SOFTIRQ0,
KM_CRYPTO_SOFTIRQ, KM_SOFTIRQ1,
KM_TYPE_NR KM_TYPE_NR
}; };
......
...@@ -29,18 +29,15 @@ ...@@ -29,18 +29,15 @@
*/ */
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
typedef struct { unsigned long pte; } pte_t; typedef struct { unsigned long pte; } pte_t;
typedef struct { unsigned long pmd; } pmd_t;
typedef struct { unsigned long pgd; } pgd_t; typedef struct { unsigned long pgd; } pgd_t;
typedef struct { unsigned long pgprot; } pgprot_t; typedef struct { unsigned long pgprot; } pgprot_t;
#endif #endif
#define pte_val(x) ((x).pte) #define pte_val(x) ((x).pte)
#define pmd_val(x) ((x).pmd)
#define pgd_val(x) ((x).pgd) #define pgd_val(x) ((x).pgd)
#define pgprot_val(x) ((x).pgprot) #define pgprot_val(x) ((x).pgprot)
#define __pte(x) ((pte_t) { (x) } ) #define __pte(x) ((pte_t) { (x) } )
#define __pmd(x) ((pmd_t) { (x) } )
#define __pgd(x) ((pgd_t) { (x) } ) #define __pgd(x) ((pgd_t) { (x) } )
#define __pgprot(x) ((pgprot_t) { (x) } ) #define __pgprot(x) ((pgprot_t) { (x) } )
...@@ -73,10 +70,6 @@ typedef struct { unsigned long pgprot; } pgprot_t; ...@@ -73,10 +70,6 @@ typedef struct { unsigned long pgprot; } pgprot_t;
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
#define BUG() do { \
printk("kernel BUG at %s:%d!\n", __FILE__, __LINE__); \
} while (0)
/* Pure 2^n version of get_order */ /* Pure 2^n version of get_order */
static inline int get_order(unsigned long size) static inline int get_order(unsigned long size)
{ {
......
...@@ -47,16 +47,6 @@ extern inline void pte_free(struct page *pte) ...@@ -47,16 +47,6 @@ extern inline void pte_free(struct page *pte)
#define __pte_free_tlb(tlb,pte) tlb_remove_page((tlb),(pte)) #define __pte_free_tlb(tlb,pte) tlb_remove_page((tlb),(pte))
/*
* We don't have any real pmd's, and this code never triggers because
* the pgd will always be present..
*/
#define pmd_alloc_one(mm, addr) ({ BUG(); ((pmd_t *)2); })
#define pmd_free(x) do { } while (0)
#define __pmd_free_tlb(tlb,x) do { } while (0)
#define pgd_populate(mm, pmd, pte) BUG()
#define check_pgt_cache() do { } while (0) #define check_pgt_cache() do { } while (0)
#endif #endif
...@@ -5,7 +5,8 @@ ...@@ -5,7 +5,8 @@
#ifndef _CRIS_PGTABLE_H #ifndef _CRIS_PGTABLE_H
#define _CRIS_PGTABLE_H #define _CRIS_PGTABLE_H
#include <asm-generic/4level-fixup.h> #include <asm/page.h>
#include <asm-generic/pgtable-nopmd.h>
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
#include <linux/config.h> #include <linux/config.h>
...@@ -41,22 +42,14 @@ extern void paging_init(void); ...@@ -41,22 +42,14 @@ extern void paging_init(void);
* but the define is needed for a generic inline function.) * but the define is needed for a generic inline function.)
*/ */
#define set_pmd(pmdptr, pmdval) (*(pmdptr) = pmdval) #define set_pmd(pmdptr, pmdval) (*(pmdptr) = pmdval)
#define set_pgd(pgdptr, pgdval) (*(pgdptr) = pgdval) #define set_pgu(pudptr, pudval) (*(pudptr) = pudval)
/* PMD_SHIFT determines the size of the area a second-level page table can /* PGDIR_SHIFT determines the size of the area a second-level page table can
* map. It is equal to the page size times the number of PTE's that fit in * map. It is equal to the page size times the number of PTE's that fit in
* a PMD page. A PTE is 4-bytes in CRIS. Hence the following number. * a PMD page. A PTE is 4-bytes in CRIS. Hence the following number.
*/ */
#define PMD_SHIFT (PAGE_SHIFT + (PAGE_SHIFT-2)) #define PGDIR_SHIFT (PAGE_SHIFT + (PAGE_SHIFT-2))
#define PMD_SIZE (1UL << PMD_SHIFT)
#define PMD_MASK (~(PMD_SIZE-1))
/* PGDIR_SHIFT determines what a third-level page table entry can map.
* Since we fold into a two-level structure, this is the same as PMD_SHIFT.
*/
#define PGDIR_SHIFT PMD_SHIFT
#define PGDIR_SIZE (1UL << PGDIR_SHIFT) #define PGDIR_SIZE (1UL << PGDIR_SHIFT)
#define PGDIR_MASK (~(PGDIR_SIZE-1)) #define PGDIR_MASK (~(PGDIR_SIZE-1))
...@@ -67,7 +60,6 @@ extern void paging_init(void); ...@@ -67,7 +60,6 @@ extern void paging_init(void);
* divide it by 4 (shift by 2). * divide it by 4 (shift by 2).
*/ */
#define PTRS_PER_PTE (1UL << (PAGE_SHIFT-2)) #define PTRS_PER_PTE (1UL << (PAGE_SHIFT-2))
#define PTRS_PER_PMD 1
#define PTRS_PER_PGD (1UL << (PAGE_SHIFT-2)) #define PTRS_PER_PGD (1UL << (PAGE_SHIFT-2))
/* calculate how many PGD entries a user-level program can use /* calculate how many PGD entries a user-level program can use
...@@ -115,16 +107,6 @@ extern unsigned long empty_zero_page; ...@@ -115,16 +107,6 @@ extern unsigned long empty_zero_page;
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
/*
* The "pgd_xxx()" functions here are trivial for a folded two-level
* setup: the pgd is never bad, and a pmd always exists (as it's folded
* into the pgd entry)
*/
extern inline int pgd_none(pgd_t pgd) { return 0; }
extern inline int pgd_bad(pgd_t pgd) { return 0; }
extern inline int pgd_present(pgd_t pgd) { return 1; }
extern inline void pgd_clear(pgd_t * pgdp) { }
/* /*
* The following only work if pte_present() is true. * The following only work if pte_present() is true.
* Undefined behaviour if not.. * Undefined behaviour if not..
...@@ -275,7 +257,7 @@ extern inline void pmd_set(pmd_t * pmdp, pte_t * ptep) ...@@ -275,7 +257,7 @@ extern inline void pmd_set(pmd_t * pmdp, pte_t * ptep)
#define pmd_page_kernel(pmd) ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK)) #define pmd_page_kernel(pmd) ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
/* to find an entry in a page-table-directory. */ /* to find an entry in a page-table-directory. */
#define pgd_index(address) ((address >> PGDIR_SHIFT) & (PTRS_PER_PGD-1)) #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
/* to find an entry in a page-table-directory */ /* to find an entry in a page-table-directory */
extern inline pgd_t * pgd_offset(struct mm_struct * mm, unsigned long address) extern inline pgd_t * pgd_offset(struct mm_struct * mm, unsigned long address)
...@@ -286,12 +268,6 @@ extern inline pgd_t * pgd_offset(struct mm_struct * mm, unsigned long address) ...@@ -286,12 +268,6 @@ extern inline pgd_t * pgd_offset(struct mm_struct * mm, unsigned long address)
/* to find an entry in a kernel page-table-directory */ /* to find an entry in a kernel page-table-directory */
#define pgd_offset_k(address) pgd_offset(&init_mm, address) #define pgd_offset_k(address) pgd_offset(&init_mm, address)
/* Find an entry in the second-level page table.. */
extern inline pmd_t * pmd_offset(pgd_t * dir, unsigned long address)
{
return (pmd_t *) dir;
}
/* Find an entry in the third-level page table.. */ /* Find an entry in the third-level page table.. */
#define __pte_offset(address) \ #define __pte_offset(address) \
(((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
...@@ -308,8 +284,6 @@ extern inline pmd_t * pmd_offset(pgd_t * dir, unsigned long address) ...@@ -308,8 +284,6 @@ extern inline pmd_t * pmd_offset(pgd_t * dir, unsigned long address)
#define pte_ERROR(e) \ #define pte_ERROR(e) \
printk("%s:%d: bad pte %p(%08lx).\n", __FILE__, __LINE__, &(e), pte_val(e)) printk("%s:%d: bad pte %p(%08lx).\n", __FILE__, __LINE__, &(e), pte_val(e))
#define pmd_ERROR(e) \
printk("%s:%d: bad pmd %p(%08lx).\n", __FILE__, __LINE__, &(e), pmd_val(e))
#define pgd_ERROR(e) \ #define pgd_ERROR(e) \
printk("%s:%d: bad pgd %p(%08lx).\n", __FILE__, __LINE__, &(e), pgd_val(e)) printk("%s:%d: bad pgd %p(%08lx).\n", __FILE__, __LINE__, &(e), pgd_val(e))
...@@ -348,5 +322,7 @@ extern inline void update_mmu_cache(struct vm_area_struct * vma, ...@@ -348,5 +322,7 @@ extern inline void update_mmu_cache(struct vm_area_struct * vma,
#define pte_to_pgoff(x) (pte_val(x) >> 6) #define pte_to_pgoff(x) (pte_val(x) >> 6)
#define pgoff_to_pte(x) __pte(((x) << 6) | _PAGE_FILE) #define pgoff_to_pte(x) __pte(((x) << 6) | _PAGE_FILE)
typedef pte_t *pte_addr_t;
#endif /* __ASSEMBLY__ */ #endif /* __ASSEMBLY__ */
#endif /* _CRIS_PGTABLE_H */ #endif /* _CRIS_PGTABLE_H */
...@@ -55,15 +55,6 @@ unsigned long get_wchan(struct task_struct *p); ...@@ -55,15 +55,6 @@ unsigned long get_wchan(struct task_struct *p);
#define KSTK_ESP(tsk) ((tsk) == current ? rdusp() : (tsk)->thread.usp) #define KSTK_ESP(tsk) ((tsk) == current ? rdusp() : (tsk)->thread.usp)
/*
* Free current thread data structures etc..
*/
extern inline void exit_thread(void)
{
/* Nothing needs to be done. */
}
extern unsigned long thread_saved_pc(struct task_struct *tsk); extern unsigned long thread_saved_pc(struct task_struct *tsk);
/* Free all resources held by a thread. */ /* Free all resources held by a thread. */
......
...@@ -43,7 +43,7 @@ struct thread_info { ...@@ -43,7 +43,7 @@ struct thread_info {
#endif #endif
#define PREEMPT_ACTIVE 0x4000000 #define PREEMPT_ACTIVE 0x10000000
/* /*
* macros/functions for gaining access to the thread information structure * macros/functions for gaining access to the thread information structure
......
...@@ -14,7 +14,7 @@ ...@@ -14,7 +14,7 @@
* used so it does not matter. * used so it does not matter.
*/ */
typedef unsigned int cycles_t; typedef unsigned long long cycles_t;
extern inline cycles_t get_cycles(void) extern inline cycles_t get_cycles(void)
{ {
......
...@@ -52,7 +52,7 @@ typedef unsigned long long u64; ...@@ -52,7 +52,7 @@ typedef unsigned long long u64;
typedef u32 dma_addr_t; typedef u32 dma_addr_t;
typedef u32 dma64_addr_t; typedef u32 dma64_addr_t;
typedef unsigned int kmem_bufctl_t; typedef unsigned short kmem_bufctl_t;
#endif /* __ASSEMBLY__ */ #endif /* __ASSEMBLY__ */
......
...@@ -288,8 +288,15 @@ ...@@ -288,8 +288,15 @@
#define __NR_mq_timedreceive (__NR_mq_open+3) #define __NR_mq_timedreceive (__NR_mq_open+3)
#define __NR_mq_notify (__NR_mq_open+4) #define __NR_mq_notify (__NR_mq_open+4)
#define __NR_mq_getsetattr (__NR_mq_open+5) #define __NR_mq_getsetattr (__NR_mq_open+5)
#define __NR_sys_kexec_load 283
#define __NR_waitid 284
/* #define __NR_sys_setaltroot 285 */
#define __NR_add_key 286
#define __NR_request_key 287
#define __NR_keyctl 288
#define NR_syscalls 289
#define NR_syscalls 283
#ifdef __KERNEL__ #ifdef __KERNEL__
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment