Commit 11a6f6ab authored by Aneesh Kumar K.V's avatar Aneesh Kumar K.V Committed by Michael Ellerman

powerpc/mm: Move radix/hash common data structures to book3s64 headers

Start moving code that is generic between radix and hash to book3s64
specific headers from the book3s64 hash specific one.
Signed-off-by: default avatarAneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
parent 33d336d9
#ifndef _ASM_POWERPC_MMU_HASH32_H_
#define _ASM_POWERPC_MMU_HASH32_H_
#ifndef _ASM_POWERPC_BOOK3S_32_MMU_HASH_H_
#define _ASM_POWERPC_BOOK3S_32_MMU_HASH_H_
/*
* 32-bit hash table MMU support
*/
......@@ -90,4 +90,4 @@ typedef struct {
#define mmu_virtual_psize MMU_PAGE_4K
#define mmu_linear_psize MMU_PAGE_256M
#endif /* _ASM_POWERPC_MMU_HASH32_H_ */
#endif /* _ASM_POWERPC_BOOK3S_32_MMU_HASH_H_ */
#ifndef _ASM_POWERPC_MMU_HASH64_H_
#define _ASM_POWERPC_MMU_HASH64_H_
#ifndef _ASM_POWERPC_BOOK3S_64_MMU_HASH_H_
#define _ASM_POWERPC_BOOK3S_64_MMU_HASH_H_
/*
* PowerPC64 memory management structures
*
......@@ -127,24 +127,6 @@ extern struct hash_pte *htab_address;
extern unsigned long htab_size_bytes;
extern unsigned long htab_hash_mask;
/*
* Page size definition
*
* shift : is the "PAGE_SHIFT" value for that page size
* sllp : is a bit mask with the value of SLB L || LP to be or'ed
* directly to a slbmte "vsid" value
* penc : is the HPTE encoding mask for the "LP" field:
*
*/
struct mmu_psize_def
{
unsigned int shift; /* number of bits */
int penc[MMU_PAGE_COUNT]; /* HPTE encoding */
unsigned int tlbiel; /* tlbiel supported for that page size */
unsigned long avpnm; /* bits to mask out in AVPN in the HPTE */
unsigned long sllp; /* SLB L||LP (exact mask to use in slbmte) */
};
extern struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT];
static inline int shift_to_mmu_psize(unsigned int shift)
{
......@@ -210,11 +192,6 @@ static inline int segment_shift(int ssize)
/*
* The current system page and segment sizes
*/
extern int mmu_linear_psize;
extern int mmu_virtual_psize;
extern int mmu_vmalloc_psize;
extern int mmu_vmemmap_psize;
extern int mmu_io_psize;
extern int mmu_kernel_ssize;
extern int mmu_highuser_ssize;
extern u16 mmu_slb_size;
......@@ -512,38 +489,6 @@ static inline void subpage_prot_free(struct mm_struct *mm) {}
static inline void subpage_prot_init_new_context(struct mm_struct *mm) { }
#endif /* CONFIG_PPC_SUBPAGE_PROT */
typedef unsigned long mm_context_id_t;
struct spinlock;
typedef struct {
mm_context_id_t id;
u16 user_psize; /* page size index */
#ifdef CONFIG_PPC_MM_SLICES
u64 low_slices_psize; /* SLB page size encodings */
unsigned char high_slices_psize[SLICE_ARRAY_SIZE];
#else
u16 sllp; /* SLB page size encoding */
#endif
unsigned long vdso_base;
#ifdef CONFIG_PPC_SUBPAGE_PROT
struct subpage_prot_table spt;
#endif /* CONFIG_PPC_SUBPAGE_PROT */
#ifdef CONFIG_PPC_ICSWX
struct spinlock *cop_lockp; /* guard acop and cop_pid */
unsigned long acop; /* mask of enabled coprocessor types */
unsigned int cop_pid; /* pid value used with coprocessors */
#endif /* CONFIG_PPC_ICSWX */
#ifdef CONFIG_PPC_64K_PAGES
/* for 4K PTE fragment support */
void *pte_frag;
#endif
#ifdef CONFIG_SPAPR_TCE_IOMMU
struct list_head iommu_group_mem_list;
#endif
} mm_context_t;
#if 0
/*
* The code below is equivalent to this function for arguments
......@@ -613,4 +558,4 @@ unsigned htab_shift_for_mem_size(unsigned long mem_size);
#endif /* __ASSEMBLY__ */
#endif /* _ASM_POWERPC_MMU_HASH64_H_ */
#endif /* _ASM_POWERPC_BOOK3S_64_MMU_HASH_H_ */
#ifndef _ASM_POWERPC_BOOK3S_64_MMU_H_
#define _ASM_POWERPC_BOOK3S_64_MMU_H_
#ifndef __ASSEMBLY__
/*
* Page size definition
*
* shift : is the "PAGE_SHIFT" value for that page size
* sllp : is a bit mask with the value of SLB L || LP to be or'ed
* directly to a slbmte "vsid" value
* penc : is the HPTE encoding mask for the "LP" field:
*
*/
struct mmu_psize_def {
unsigned int shift; /* number of bits */
int penc[MMU_PAGE_COUNT]; /* HPTE encoding */
unsigned int tlbiel; /* tlbiel supported for that page size */
unsigned long avpnm; /* bits to mask out in AVPN in the HPTE */
unsigned long sllp; /* SLB L||LP (exact mask to use in slbmte) */
};
extern struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT];
#endif /* __ASSEMBLY__ */
#ifdef CONFIG_PPC_STD_MMU_64
/* 64-bit classic hash table MMU */
#include <asm/book3s/64/mmu-hash.h>
#endif
#ifndef __ASSEMBLY__
typedef unsigned long mm_context_id_t;
struct spinlock;
typedef struct {
mm_context_id_t id;
u16 user_psize; /* page size index */
#ifdef CONFIG_PPC_MM_SLICES
u64 low_slices_psize; /* SLB page size encodings */
unsigned char high_slices_psize[SLICE_ARRAY_SIZE];
#else
u16 sllp; /* SLB page size encoding */
#endif
unsigned long vdso_base;
#ifdef CONFIG_PPC_SUBPAGE_PROT
struct subpage_prot_table spt;
#endif /* CONFIG_PPC_SUBPAGE_PROT */
#ifdef CONFIG_PPC_ICSWX
struct spinlock *cop_lockp; /* guard acop and cop_pid */
unsigned long acop; /* mask of enabled coprocessor types */
unsigned int cop_pid; /* pid value used with coprocessors */
#endif /* CONFIG_PPC_ICSWX */
#ifdef CONFIG_PPC_64K_PAGES
/* for 4K PTE fragment support */
void *pte_frag;
#endif
#ifdef CONFIG_SPAPR_TCE_IOMMU
struct list_head iommu_group_mem_list;
#endif
} mm_context_t;
/*
* The current system page and segment sizes
*/
extern int mmu_linear_psize;
extern int mmu_virtual_psize;
extern int mmu_vmalloc_psize;
extern int mmu_vmemmap_psize;
extern int mmu_io_psize;
#endif /* __ASSEMBLY__ */
#endif /* _ASM_POWERPC_BOOK3S_64_MMU_H_ */
......@@ -181,10 +181,13 @@ static inline void assert_pte_locked(struct mm_struct *mm, unsigned long addr)
#define MMU_PAGE_COUNT 15
#if defined(CONFIG_PPC_STD_MMU_64)
/* 64-bit classic hash table MMU */
#include <asm/book3s/64/mmu-hash.h>
#elif defined(CONFIG_PPC_STD_MMU_32)
#ifdef CONFIG_PPC_BOOK3S_64
#include <asm/book3s/64/mmu.h>
#else /* CONFIG_PPC_BOOK3S_64 */
#endif
#if defined(CONFIG_PPC_STD_MMU_32)
/* 32-bit classic hash table MMU */
#include <asm/book3s/32/mmu-hash.h>
#elif defined(CONFIG_40x)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment