Commit 56372b0b authored by GuanXuetao's avatar GuanXuetao

unicore32 core architecture: mm related: fault handling

This patch implements fault handling of memory management.
Signed-off-by: default avatarGuan Xuetao <gxt@mprc.pku.edu.cn>
Reviewed-by: default avatarArnd Bergmann <arnd@arndb.de>
parent b50f1704
/*
* linux/arch/unicore32/include/asm/mmu.h
*
* Code specific to PKUnity SoC and UniCore ISA
*
* Copyright (C) 2001-2010 GUAN Xue-tao
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef __UNICORE_MMU_H__
#define __UNICORE_MMU_H__
typedef unsigned long mm_context_t;
#endif
/*
* linux/arch/unicore32/include/asm/mmu_context.h
*
* Code specific to PKUnity SoC and UniCore ISA
*
* Copyright (C) 2001-2010 GUAN Xue-tao
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef __UNICORE_MMU_CONTEXT_H__
#define __UNICORE_MMU_CONTEXT_H__
#include <linux/compiler.h>
#include <linux/sched.h>
#include <linux/io.h>
#include <asm/cacheflush.h>
#include <asm/cpu-single.h>
#define init_new_context(tsk, mm) 0
#define destroy_context(mm) do { } while (0)
/*
* This is called when "tsk" is about to enter lazy TLB mode.
*
* mm: describes the currently active mm context
* tsk: task which is entering lazy tlb
* cpu: cpu number which is entering lazy tlb
*
* tsk->mm will be NULL
*/
static inline void
enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
{
}
/*
* This is the actual mm switch as far as the scheduler
* is concerned. No registers are touched. We avoid
* calling the CPU specific function when the mm hasn't
* actually changed.
*/
static inline void
switch_mm(struct mm_struct *prev, struct mm_struct *next,
struct task_struct *tsk)
{
unsigned int cpu = smp_processor_id();
if (!cpumask_test_and_set_cpu(cpu, mm_cpumask(next)) || prev != next)
cpu_switch_mm(next->pgd, next);
}
#define deactivate_mm(tsk, mm) do { } while (0)
#define activate_mm(prev, next) switch_mm(prev, next, NULL)
/*
* We are inserting a "fake" vma for the user-accessible vector page so
* gdb and friends can get to it through ptrace and /proc/<pid>/mem.
* But we also want to remove it before the generic code gets to see it
* during process exit or the unmapping of it would cause total havoc.
* (the macro is used as remove_vma() is static to mm/mmap.c)
*/
#define arch_exit_mmap(mm) \
do { \
struct vm_area_struct *high_vma = find_vma(mm, 0xffff0000); \
if (high_vma) { \
BUG_ON(high_vma->vm_next); /* it should be last */ \
if (high_vma->vm_prev) \
high_vma->vm_prev->vm_next = NULL; \
else \
mm->mmap = NULL; \
rb_erase(&high_vma->vm_rb, &mm->mm_rb); \
mm->mmap_cache = NULL; \
mm->map_count--; \
remove_vma(high_vma); \
} \
} while (0)
static inline void arch_dup_mmap(struct mm_struct *oldmm,
struct mm_struct *mm)
{
}
#endif
/*
* linux/arch/unicore32/include/asm/pgalloc.h
*
* Code specific to PKUnity SoC and UniCore ISA
*
* Copyright (C) 2001-2010 GUAN Xue-tao
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef __UNICORE_PGALLOC_H__
#define __UNICORE_PGALLOC_H__
#include <asm/pgtable-hwdef.h>
#include <asm/processor.h>
#include <asm/cacheflush.h>
#include <asm/tlbflush.h>
#define check_pgt_cache() do { } while (0)
#define _PAGE_USER_TABLE (PMD_TYPE_TABLE | PMD_PRESENT)
#define _PAGE_KERNEL_TABLE (PMD_TYPE_TABLE | PMD_PRESENT)
extern pgd_t *get_pgd_slow(struct mm_struct *mm);
extern void free_pgd_slow(struct mm_struct *mm, pgd_t *pgd);
#define pgd_alloc(mm) get_pgd_slow(mm)
#define pgd_free(mm, pgd) free_pgd_slow(mm, pgd)
#define PGALLOC_GFP (GFP_KERNEL | __GFP_NOTRACK | __GFP_REPEAT | __GFP_ZERO)
/*
* Allocate one PTE table.
*/
static inline pte_t *
pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr)
{
pte_t *pte;
pte = (pte_t *)__get_free_page(PGALLOC_GFP);
if (pte)
clean_dcache_area(pte, PTRS_PER_PTE * sizeof(pte_t));
return pte;
}
static inline pgtable_t
pte_alloc_one(struct mm_struct *mm, unsigned long addr)
{
struct page *pte;
pte = alloc_pages(PGALLOC_GFP, 0);
if (pte) {
if (!PageHighMem(pte)) {
void *page = page_address(pte);
clean_dcache_area(page, PTRS_PER_PTE * sizeof(pte_t));
}
pgtable_page_ctor(pte);
}
return pte;
}
/*
* Free one PTE table.
*/
static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
{
if (pte)
free_page((unsigned long)pte);
}
static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
{
pgtable_page_dtor(pte);
__free_page(pte);
}
static inline void __pmd_populate(pmd_t *pmdp, unsigned long pmdval)
{
set_pmd(pmdp, __pmd(pmdval));
flush_pmd_entry(pmdp);
}
/*
* Populate the pmdp entry with a pointer to the pte. This pmd is part
* of the mm address space.
*/
static inline void
pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmdp, pte_t *ptep)
{
unsigned long pte_ptr = (unsigned long)ptep;
/*
* The pmd must be loaded with the physical
* address of the PTE table
*/
__pmd_populate(pmdp, __pa(pte_ptr) | _PAGE_KERNEL_TABLE);
}
static inline void
pmd_populate(struct mm_struct *mm, pmd_t *pmdp, pgtable_t ptep)
{
__pmd_populate(pmdp,
page_to_pfn(ptep) << PAGE_SHIFT | _PAGE_USER_TABLE);
}
#define pmd_pgtable(pmd) pmd_page(pmd)
#endif
/*
* linux/arch/unicore32/include/asm/pgtable-hwdef.h
*
* Code specific to PKUnity SoC and UniCore ISA
*
* Copyright (C) 2001-2010 GUAN Xue-tao
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef __UNICORE_PGTABLE_HWDEF_H__
#define __UNICORE_PGTABLE_HWDEF_H__
/*
* Hardware page table definitions.
*
* + Level 1 descriptor (PMD)
* - common
*/
#define PMD_TYPE_MASK (3 << 0)
#define PMD_TYPE_TABLE (0 << 0)
/*#define PMD_TYPE_LARGE (1 << 0) */
#define PMD_TYPE_INVALID (2 << 0)
#define PMD_TYPE_SECT (3 << 0)
#define PMD_PRESENT (1 << 2)
#define PMD_YOUNG (1 << 3)
/*#define PMD_SECT_DIRTY (1 << 4) */
#define PMD_SECT_CACHEABLE (1 << 5)
#define PMD_SECT_EXEC (1 << 6)
#define PMD_SECT_WRITE (1 << 7)
#define PMD_SECT_READ (1 << 8)
/*
* + Level 2 descriptor (PTE)
* - common
*/
#define PTE_TYPE_MASK (3 << 0)
#define PTE_TYPE_SMALL (0 << 0)
#define PTE_TYPE_MIDDLE (1 << 0)
#define PTE_TYPE_LARGE (2 << 0)
#define PTE_TYPE_INVALID (3 << 0)
#define PTE_PRESENT (1 << 2)
#define PTE_FILE (1 << 3) /* only when !PRESENT */
#define PTE_YOUNG (1 << 3)
#define PTE_DIRTY (1 << 4)
#define PTE_CACHEABLE (1 << 5)
#define PTE_EXEC (1 << 6)
#define PTE_WRITE (1 << 7)
#define PTE_READ (1 << 8)
#endif
This diff is collapsed.
This diff is collapsed.
/*
* linux/arch/unicore32/mm/extable.c
*
* Code specific to PKUnity SoC and UniCore ISA
*
* Copyright (C) 2001-2010 GUAN Xue-tao
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/module.h>
#include <linux/uaccess.h>
int fixup_exception(struct pt_regs *regs)
{
const struct exception_table_entry *fixup;
fixup = search_exception_tables(instruction_pointer(regs));
if (fixup)
regs->UCreg_pc = fixup->fixup;
return fixup != NULL;
}
This diff is collapsed.
This diff is collapsed.
/*
* linux/arch/unicore32/mm/pgd.c
*
* Code specific to PKUnity SoC and UniCore ISA
*
* Copyright (C) 2001-2010 GUAN Xue-tao
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/mm.h>
#include <linux/gfp.h>
#include <linux/highmem.h>
#include <asm/pgalloc.h>
#include <asm/page.h>
#include <asm/tlbflush.h>
#include "mm.h"
#define FIRST_KERNEL_PGD_NR (FIRST_USER_PGD_NR + USER_PTRS_PER_PGD)
/*
* need to get a 4k page for level 1
*/
pgd_t *get_pgd_slow(struct mm_struct *mm)
{
pgd_t *new_pgd, *init_pgd;
pmd_t *new_pmd, *init_pmd;
pte_t *new_pte, *init_pte;
new_pgd = (pgd_t *)__get_free_pages(GFP_KERNEL, 0);
if (!new_pgd)
goto no_pgd;
memset(new_pgd, 0, FIRST_KERNEL_PGD_NR * sizeof(pgd_t));
/*
* Copy over the kernel and IO PGD entries
*/
init_pgd = pgd_offset_k(0);
memcpy(new_pgd + FIRST_KERNEL_PGD_NR, init_pgd + FIRST_KERNEL_PGD_NR,
(PTRS_PER_PGD - FIRST_KERNEL_PGD_NR) * sizeof(pgd_t));
clean_dcache_area(new_pgd, PTRS_PER_PGD * sizeof(pgd_t));
if (!vectors_high()) {
/*
* On UniCore, first page must always be allocated since it
* contains the machine vectors.
*/
new_pmd = pmd_alloc(mm, (pud_t *)new_pgd, 0);
if (!new_pmd)
goto no_pmd;
new_pte = pte_alloc_map(mm, new_pmd, 0);
if (!new_pte)
goto no_pte;
init_pmd = pmd_offset((pud_t *)init_pgd, 0);
init_pte = pte_offset_map(init_pmd, 0);
set_pte(new_pte, *init_pte);
pte_unmap(init_pte);
pte_unmap(new_pte);
}
return new_pgd;
no_pte:
pmd_free(mm, new_pmd);
no_pmd:
free_pages((unsigned long)new_pgd, 0);
no_pgd:
return NULL;
}
void free_pgd_slow(struct mm_struct *mm, pgd_t *pgd)
{
pmd_t *pmd;
pgtable_t pte;
if (!pgd)
return;
/* pgd is always present and good */
pmd = pmd_off(pgd, 0);
if (pmd_none(*pmd))
goto free;
if (pmd_bad(*pmd)) {
pmd_ERROR(*pmd);
pmd_clear(pmd);
goto free;
}
pte = pmd_pgtable(*pmd);
pmd_clear(pmd);
pte_free(mm, pte);
pmd_free(mm, pmd);
free:
free_pages((unsigned long) pgd, 0);
}
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment