Commit aabded9c authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/paulus/powerpc

* 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/paulus/powerpc:
  [POWERPC] Further fixes for the removal of 4level-fixup hack from ppc32
  [POWERPC] EEH: log all PCI-X and PCI-E AER registers
  [POWERPC] EEH: capture and log pci state on error
  [POWERPC] EEH: Split up long error msg
  [POWERPC] EEH: log error only after driver notification.
  [POWERPC] fsl_soc: Make mac_addr const in fs_enet_of_init().
  [POWERPC] Don't use SLAB/SLUB for PTE pages
  [POWERPC] Spufs support for 64K LS mappings on 4K kernels
  [POWERPC] Add ability to 4K kernel to hash in 64K pages
  [POWERPC] Introduce address space "slices"
  [POWERPC] Small fixes & cleanups in segment page size demotion
  [POWERPC] iSeries: Make HVC_ISERIES the default
  [POWERPC] iSeries: suppress build warning in lparmap.c
  [POWERPC] Mark pages that don't exist as nosave
  [POWERPC] swsusp: Introduce register_nosave_region_late
parents 9a9136e2 f1a1eb29
...@@ -120,19 +120,6 @@ config GENERIC_BUG ...@@ -120,19 +120,6 @@ config GENERIC_BUG
config SYS_SUPPORTS_APM_EMULATION config SYS_SUPPORTS_APM_EMULATION
bool bool
#
# Powerpc uses the slab allocator to manage its ptes and the
# page structs of ptes are used for splitting the page table
# lock for configurations supporting more than SPLIT_PTLOCK_CPUS.
#
# In that special configuration the page structs of slabs are modified.
# This setting disables the selection of SLUB as a slab allocator.
#
config ARCH_USES_SLAB_PAGE_STRUCT
bool
default y
depends on SPLIT_PTLOCK_CPUS <= NR_CPUS
config DEFAULT_UIMAGE config DEFAULT_UIMAGE
bool bool
help help
...@@ -352,6 +339,11 @@ config PPC_STD_MMU_32 ...@@ -352,6 +339,11 @@ config PPC_STD_MMU_32
def_bool y def_bool y
depends on PPC_STD_MMU && PPC32 depends on PPC_STD_MMU && PPC32
config PPC_MM_SLICES
bool
default y if HUGETLB_PAGE
default n
config VIRT_CPU_ACCOUNTING config VIRT_CPU_ACCOUNTING
bool "Deterministic task and CPU time accounting" bool "Deterministic task and CPU time accounting"
depends on PPC64 depends on PPC64
...@@ -541,9 +533,15 @@ config NODES_SPAN_OTHER_NODES ...@@ -541,9 +533,15 @@ config NODES_SPAN_OTHER_NODES
def_bool y def_bool y
depends on NEED_MULTIPLE_NODES depends on NEED_MULTIPLE_NODES
config PPC_HAS_HASH_64K
bool
depends on PPC64
default n
config PPC_64K_PAGES config PPC_64K_PAGES
bool "64k page size" bool "64k page size"
depends on PPC64 depends on PPC64
select PPC_HAS_HASH_64K
help help
This option changes the kernel logical page size to 64k. On machines This option changes the kernel logical page size to 64k. On machines
without processor support for 64k pages, the kernel will simulate without processor support for 64k pages, the kernel will simulate
......
...@@ -122,12 +122,18 @@ int main(void) ...@@ -122,12 +122,18 @@ int main(void)
DEFINE(PACASLBCACHE, offsetof(struct paca_struct, slb_cache)); DEFINE(PACASLBCACHE, offsetof(struct paca_struct, slb_cache));
DEFINE(PACASLBCACHEPTR, offsetof(struct paca_struct, slb_cache_ptr)); DEFINE(PACASLBCACHEPTR, offsetof(struct paca_struct, slb_cache_ptr));
DEFINE(PACACONTEXTID, offsetof(struct paca_struct, context.id)); DEFINE(PACACONTEXTID, offsetof(struct paca_struct, context.id));
DEFINE(PACACONTEXTSLLP, offsetof(struct paca_struct, context.sllp));
DEFINE(PACAVMALLOCSLLP, offsetof(struct paca_struct, vmalloc_sllp)); DEFINE(PACAVMALLOCSLLP, offsetof(struct paca_struct, vmalloc_sllp));
#ifdef CONFIG_HUGETLB_PAGE #ifdef CONFIG_PPC_MM_SLICES
DEFINE(PACALOWHTLBAREAS, offsetof(struct paca_struct, context.low_htlb_areas)); DEFINE(PACALOWSLICESPSIZE, offsetof(struct paca_struct,
DEFINE(PACAHIGHHTLBAREAS, offsetof(struct paca_struct, context.high_htlb_areas)); context.low_slices_psize));
#endif /* CONFIG_HUGETLB_PAGE */ DEFINE(PACAHIGHSLICEPSIZE, offsetof(struct paca_struct,
context.high_slices_psize));
DEFINE(MMUPSIZEDEFSIZE, sizeof(struct mmu_psize_def));
DEFINE(MMUPSIZESLLP, offsetof(struct mmu_psize_def, sllp));
#else
DEFINE(PACACONTEXTSLLP, offsetof(struct paca_struct, context.sllp));
#endif /* CONFIG_PPC_MM_SLICES */
DEFINE(PACA_EXGEN, offsetof(struct paca_struct, exgen)); DEFINE(PACA_EXGEN, offsetof(struct paca_struct, exgen));
DEFINE(PACA_EXMC, offsetof(struct paca_struct, exmc)); DEFINE(PACA_EXMC, offsetof(struct paca_struct, exmc));
DEFINE(PACA_EXSLB, offsetof(struct paca_struct, exslb)); DEFINE(PACA_EXSLB, offsetof(struct paca_struct, exslb));
......
...@@ -10,7 +10,8 @@ ...@@ -10,7 +10,8 @@
#include <asm/pgtable.h> #include <asm/pgtable.h>
#include <asm/iseries/lpar_map.h> #include <asm/iseries/lpar_map.h>
const struct LparMap __attribute__((__section__(".text"))) xLparMap = { /* The # is to stop gcc trying to make .text nonexecutable */
const struct LparMap __attribute__((__section__(".text #"))) xLparMap = {
.xNumberEsids = HvEsidsToMap, .xNumberEsids = HvEsidsToMap,
.xNumberRanges = HvRangesToMap, .xNumberRanges = HvRangesToMap,
.xSegmentTableOffs = STAB0_PAGE, .xSegmentTableOffs = STAB0_PAGE,
......
...@@ -18,4 +18,5 @@ obj-$(CONFIG_40x) += 4xx_mmu.o ...@@ -18,4 +18,5 @@ obj-$(CONFIG_40x) += 4xx_mmu.o
obj-$(CONFIG_44x) += 44x_mmu.o obj-$(CONFIG_44x) += 44x_mmu.o
obj-$(CONFIG_FSL_BOOKE) += fsl_booke_mmu.o obj-$(CONFIG_FSL_BOOKE) += fsl_booke_mmu.o
obj-$(CONFIG_NEED_MULTIPLE_NODES) += numa.o obj-$(CONFIG_NEED_MULTIPLE_NODES) += numa.o
obj-$(CONFIG_PPC_MM_SLICES) += slice.o
obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
...@@ -615,6 +615,9 @@ htab_pte_insert_failure: ...@@ -615,6 +615,9 @@ htab_pte_insert_failure:
li r3,-1 li r3,-1
b htab_bail b htab_bail
#endif /* CONFIG_PPC_64K_PAGES */
#ifdef CONFIG_PPC_HAS_HASH_64K
/***************************************************************************** /*****************************************************************************
* * * *
...@@ -870,7 +873,7 @@ ht64_pte_insert_failure: ...@@ -870,7 +873,7 @@ ht64_pte_insert_failure:
b ht64_bail b ht64_bail
#endif /* CONFIG_PPC_64K_PAGES */ #endif /* CONFIG_PPC_HAS_HASH_64K */
/***************************************************************************** /*****************************************************************************
......
...@@ -51,6 +51,7 @@ ...@@ -51,6 +51,7 @@
#include <asm/cputable.h> #include <asm/cputable.h>
#include <asm/abs_addr.h> #include <asm/abs_addr.h>
#include <asm/sections.h> #include <asm/sections.h>
#include <asm/spu.h>
#ifdef DEBUG #ifdef DEBUG
#define DBG(fmt...) udbg_printf(fmt) #define DBG(fmt...) udbg_printf(fmt)
...@@ -419,7 +420,7 @@ static void __init htab_finish_init(void) ...@@ -419,7 +420,7 @@ static void __init htab_finish_init(void)
extern unsigned int *htab_call_hpte_remove; extern unsigned int *htab_call_hpte_remove;
extern unsigned int *htab_call_hpte_updatepp; extern unsigned int *htab_call_hpte_updatepp;
#ifdef CONFIG_PPC_64K_PAGES #ifdef CONFIG_PPC_HAS_HASH_64K
extern unsigned int *ht64_call_hpte_insert1; extern unsigned int *ht64_call_hpte_insert1;
extern unsigned int *ht64_call_hpte_insert2; extern unsigned int *ht64_call_hpte_insert2;
extern unsigned int *ht64_call_hpte_remove; extern unsigned int *ht64_call_hpte_remove;
...@@ -596,22 +597,23 @@ unsigned int hash_page_do_lazy_icache(unsigned int pp, pte_t pte, int trap) ...@@ -596,22 +597,23 @@ unsigned int hash_page_do_lazy_icache(unsigned int pp, pte_t pte, int trap)
* Demote a segment to using 4k pages. * Demote a segment to using 4k pages.
* For now this makes the whole process use 4k pages. * For now this makes the whole process use 4k pages.
*/ */
void demote_segment_4k(struct mm_struct *mm, unsigned long addr)
{
#ifdef CONFIG_PPC_64K_PAGES #ifdef CONFIG_PPC_64K_PAGES
static void demote_segment_4k(struct mm_struct *mm, unsigned long addr)
{
if (mm->context.user_psize == MMU_PAGE_4K) if (mm->context.user_psize == MMU_PAGE_4K)
return; return;
#ifdef CONFIG_PPC_MM_SLICES
slice_set_user_psize(mm, MMU_PAGE_4K);
#else /* CONFIG_PPC_MM_SLICES */
mm->context.user_psize = MMU_PAGE_4K; mm->context.user_psize = MMU_PAGE_4K;
mm->context.sllp = SLB_VSID_USER | mmu_psize_defs[MMU_PAGE_4K].sllp; mm->context.sllp = SLB_VSID_USER | mmu_psize_defs[MMU_PAGE_4K].sllp;
get_paca()->context = mm->context; #endif /* CONFIG_PPC_MM_SLICES */
slb_flush_and_rebolt();
#ifdef CONFIG_SPE_BASE #ifdef CONFIG_SPE_BASE
spu_flush_all_slbs(mm); spu_flush_all_slbs(mm);
#endif #endif
#endif
} }
#endif /* CONFIG_PPC_64K_PAGES */
EXPORT_SYMBOL_GPL(demote_segment_4k);
/* Result code is: /* Result code is:
* 0 - handled * 0 - handled
...@@ -646,7 +648,11 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap) ...@@ -646,7 +648,11 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
return 1; return 1;
} }
vsid = get_vsid(mm->context.id, ea); vsid = get_vsid(mm->context.id, ea);
#ifdef CONFIG_PPC_MM_SLICES
psize = get_slice_psize(mm, ea);
#else
psize = mm->context.user_psize; psize = mm->context.user_psize;
#endif
break; break;
case VMALLOC_REGION_ID: case VMALLOC_REGION_ID:
mm = &init_mm; mm = &init_mm;
...@@ -674,11 +680,22 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap) ...@@ -674,11 +680,22 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
if (user_region && cpus_equal(mm->cpu_vm_mask, tmp)) if (user_region && cpus_equal(mm->cpu_vm_mask, tmp))
local = 1; local = 1;
#ifdef CONFIG_HUGETLB_PAGE
/* Handle hugepage regions */ /* Handle hugepage regions */
if (unlikely(in_hugepage_area(mm->context, ea))) { if (HPAGE_SHIFT && psize == mmu_huge_psize) {
DBG_LOW(" -> huge page !\n"); DBG_LOW(" -> huge page !\n");
return hash_huge_page(mm, access, ea, vsid, local, trap); return hash_huge_page(mm, access, ea, vsid, local, trap);
} }
#endif /* CONFIG_HUGETLB_PAGE */
#ifndef CONFIG_PPC_64K_PAGES
/* If we use 4K pages and our psize is not 4K, then we are hitting
* a special driver mapping, we need to align the address before
* we fetch the PTE
*/
if (psize != MMU_PAGE_4K)
ea &= ~((1ul << mmu_psize_defs[psize].shift) - 1);
#endif /* CONFIG_PPC_64K_PAGES */
/* Get PTE and page size from page tables */ /* Get PTE and page size from page tables */
ptep = find_linux_pte(pgdir, ea); ptep = find_linux_pte(pgdir, ea);
...@@ -702,18 +719,17 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap) ...@@ -702,18 +719,17 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
} }
/* Do actual hashing */ /* Do actual hashing */
#ifndef CONFIG_PPC_64K_PAGES #ifdef CONFIG_PPC_64K_PAGES
rc = __hash_page_4K(ea, access, vsid, ptep, trap, local);
#else
/* If _PAGE_4K_PFN is set, make sure this is a 4k segment */ /* If _PAGE_4K_PFN is set, make sure this is a 4k segment */
if (pte_val(*ptep) & _PAGE_4K_PFN) { if (pte_val(*ptep) & _PAGE_4K_PFN) {
demote_segment_4k(mm, ea); demote_segment_4k(mm, ea);
psize = MMU_PAGE_4K; psize = MMU_PAGE_4K;
} }
if (mmu_ci_restrictions) { /* If this PTE is non-cacheable and we have restrictions on
/* If this PTE is non-cacheable, switch to 4k */ * using non cacheable large pages, then we switch to 4k
if (psize == MMU_PAGE_64K && */
if (mmu_ci_restrictions && psize == MMU_PAGE_64K &&
(pte_val(*ptep) & _PAGE_NO_CACHE)) { (pte_val(*ptep) & _PAGE_NO_CACHE)) {
if (user_region) { if (user_region) {
demote_segment_4k(mm, ea); demote_segment_4k(mm, ea);
...@@ -728,14 +744,15 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap) ...@@ -728,14 +744,15 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
"to 4kB pages because of " "to 4kB pages because of "
"non-cacheable mapping\n"); "non-cacheable mapping\n");
psize = mmu_vmalloc_psize = MMU_PAGE_4K; psize = mmu_vmalloc_psize = MMU_PAGE_4K;
}
#ifdef CONFIG_SPE_BASE #ifdef CONFIG_SPE_BASE
spu_flush_all_slbs(mm); spu_flush_all_slbs(mm);
#endif #endif
} }
}
if (user_region) { if (user_region) {
if (psize != get_paca()->context.user_psize) { if (psize != get_paca()->context.user_psize) {
get_paca()->context = mm->context; get_paca()->context.user_psize =
mm->context.user_psize;
slb_flush_and_rebolt(); slb_flush_and_rebolt();
} }
} else if (get_paca()->vmalloc_sllp != } else if (get_paca()->vmalloc_sllp !=
...@@ -744,12 +761,14 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap) ...@@ -744,12 +761,14 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
mmu_psize_defs[mmu_vmalloc_psize].sllp; mmu_psize_defs[mmu_vmalloc_psize].sllp;
slb_flush_and_rebolt(); slb_flush_and_rebolt();
} }
} #endif /* CONFIG_PPC_64K_PAGES */
#ifdef CONFIG_PPC_HAS_HASH_64K
if (psize == MMU_PAGE_64K) if (psize == MMU_PAGE_64K)
rc = __hash_page_64K(ea, access, vsid, ptep, trap, local); rc = __hash_page_64K(ea, access, vsid, ptep, trap, local);
else else
#endif /* CONFIG_PPC_HAS_HASH_64K */
rc = __hash_page_4K(ea, access, vsid, ptep, trap, local); rc = __hash_page_4K(ea, access, vsid, ptep, trap, local);
#endif /* CONFIG_PPC_64K_PAGES */
#ifndef CONFIG_PPC_64K_PAGES #ifndef CONFIG_PPC_64K_PAGES
DBG_LOW(" o-pte: %016lx\n", pte_val(*ptep)); DBG_LOW(" o-pte: %016lx\n", pte_val(*ptep));
...@@ -772,42 +791,55 @@ void hash_preload(struct mm_struct *mm, unsigned long ea, ...@@ -772,42 +791,55 @@ void hash_preload(struct mm_struct *mm, unsigned long ea,
unsigned long flags; unsigned long flags;
int local = 0; int local = 0;
/* We don't want huge pages prefaulted for now BUG_ON(REGION_ID(ea) != USER_REGION_ID);
*/
if (unlikely(in_hugepage_area(mm->context, ea))) #ifdef CONFIG_PPC_MM_SLICES
/* We only prefault standard pages for now */
if (unlikely(get_slice_psize(mm, ea) != mm->context.user_psize));
return; return;
#endif
DBG_LOW("hash_preload(mm=%p, mm->pgdir=%p, ea=%016lx, access=%lx," DBG_LOW("hash_preload(mm=%p, mm->pgdir=%p, ea=%016lx, access=%lx,"
" trap=%lx\n", mm, mm->pgd, ea, access, trap); " trap=%lx\n", mm, mm->pgd, ea, access, trap);
/* Get PTE, VSID, access mask */ /* Get Linux PTE if available */
pgdir = mm->pgd; pgdir = mm->pgd;
if (pgdir == NULL) if (pgdir == NULL)
return; return;
ptep = find_linux_pte(pgdir, ea); ptep = find_linux_pte(pgdir, ea);
if (!ptep) if (!ptep)
return; return;
#ifdef CONFIG_PPC_64K_PAGES
/* If either _PAGE_4K_PFN or _PAGE_NO_CACHE is set (and we are on
* a 64K kernel), then we don't preload, hash_page() will take
* care of it once we actually try to access the page.
* That way we don't have to duplicate all of the logic for segment
* page size demotion here
*/
if (pte_val(*ptep) & (_PAGE_4K_PFN | _PAGE_NO_CACHE))
return;
#endif /* CONFIG_PPC_64K_PAGES */
/* Get VSID */
vsid = get_vsid(mm->context.id, ea); vsid = get_vsid(mm->context.id, ea);
/* Hash it in */ /* Hash doesn't like irqs */
local_irq_save(flags); local_irq_save(flags);
/* Is that local to this CPU ? */
mask = cpumask_of_cpu(smp_processor_id()); mask = cpumask_of_cpu(smp_processor_id());
if (cpus_equal(mm->cpu_vm_mask, mask)) if (cpus_equal(mm->cpu_vm_mask, mask))
local = 1; local = 1;
#ifndef CONFIG_PPC_64K_PAGES
__hash_page_4K(ea, access, vsid, ptep, trap, local); /* Hash it in */
#else #ifdef CONFIG_PPC_HAS_HASH_64K
if (mmu_ci_restrictions) {
/* If this PTE is non-cacheable, switch to 4k */
if (mm->context.user_psize == MMU_PAGE_64K &&
(pte_val(*ptep) & _PAGE_NO_CACHE))
demote_segment_4k(mm, ea);
}
if (mm->context.user_psize == MMU_PAGE_64K) if (mm->context.user_psize == MMU_PAGE_64K)
__hash_page_64K(ea, access, vsid, ptep, trap, local); __hash_page_64K(ea, access, vsid, ptep, trap, local);
else else
__hash_page_4K(ea, access, vsid, ptep, trap, local);
#endif /* CONFIG_PPC_64K_PAGES */ #endif /* CONFIG_PPC_64K_PAGES */
__hash_page_4K(ea, access, vsid, ptep, trap, local);
local_irq_restore(flags); local_irq_restore(flags);
} }
......
This diff is collapsed.
...@@ -146,21 +146,16 @@ static void zero_ctor(void *addr, struct kmem_cache *cache, unsigned long flags) ...@@ -146,21 +146,16 @@ static void zero_ctor(void *addr, struct kmem_cache *cache, unsigned long flags)
memset(addr, 0, kmem_cache_size(cache)); memset(addr, 0, kmem_cache_size(cache));
} }
#ifdef CONFIG_PPC_64K_PAGES
static const unsigned int pgtable_cache_size[3] = {
PTE_TABLE_SIZE, PMD_TABLE_SIZE, PGD_TABLE_SIZE
};
static const char *pgtable_cache_name[ARRAY_SIZE(pgtable_cache_size)] = {
"pte_pmd_cache", "pmd_cache", "pgd_cache",
};
#else
static const unsigned int pgtable_cache_size[2] = { static const unsigned int pgtable_cache_size[2] = {
PTE_TABLE_SIZE, PMD_TABLE_SIZE PGD_TABLE_SIZE, PMD_TABLE_SIZE
}; };
static const char *pgtable_cache_name[ARRAY_SIZE(pgtable_cache_size)] = { static const char *pgtable_cache_name[ARRAY_SIZE(pgtable_cache_size)] = {
"pgd_pte_cache", "pud_pmd_cache", #ifdef CONFIG_PPC_64K_PAGES
}; "pgd_cache", "pmd_cache",
#else
"pgd_cache", "pud_pmd_cache",
#endif /* CONFIG_PPC_64K_PAGES */ #endif /* CONFIG_PPC_64K_PAGES */
};
#ifdef CONFIG_HUGETLB_PAGE #ifdef CONFIG_HUGETLB_PAGE
/* Hugepages need one extra cache, initialized in hugetlbpage.c. We /* Hugepages need one extra cache, initialized in hugetlbpage.c. We
......
...@@ -31,6 +31,7 @@ ...@@ -31,6 +31,7 @@
#include <linux/highmem.h> #include <linux/highmem.h>
#include <linux/initrd.h> #include <linux/initrd.h>
#include <linux/pagemap.h> #include <linux/pagemap.h>
#include <linux/suspend.h>
#include <asm/pgalloc.h> #include <asm/pgalloc.h>
#include <asm/prom.h> #include <asm/prom.h>
...@@ -276,6 +277,28 @@ void __init do_init_bootmem(void) ...@@ -276,6 +277,28 @@ void __init do_init_bootmem(void)
init_bootmem_done = 1; init_bootmem_done = 1;
} }
/* mark pages that don't exist as nosave */
static int __init mark_nonram_nosave(void)
{
unsigned long lmb_next_region_start_pfn,
lmb_region_max_pfn;
int i;
for (i = 0; i < lmb.memory.cnt - 1; i++) {
lmb_region_max_pfn =
(lmb.memory.region[i].base >> PAGE_SHIFT) +
(lmb.memory.region[i].size >> PAGE_SHIFT);
lmb_next_region_start_pfn =
lmb.memory.region[i+1].base >> PAGE_SHIFT;
if (lmb_region_max_pfn < lmb_next_region_start_pfn)
register_nosave_region(lmb_region_max_pfn,
lmb_next_region_start_pfn);
}
return 0;
}
/* /*
* paging_init() sets up the page tables - in fact we've already done this. * paging_init() sets up the page tables - in fact we've already done this.
*/ */
...@@ -307,6 +330,8 @@ void __init paging_init(void) ...@@ -307,6 +330,8 @@ void __init paging_init(void)
max_zone_pfns[ZONE_DMA] = top_of_ram >> PAGE_SHIFT; max_zone_pfns[ZONE_DMA] = top_of_ram >> PAGE_SHIFT;
#endif #endif
free_area_init_nodes(max_zone_pfns); free_area_init_nodes(max_zone_pfns);
mark_nonram_nosave();
} }
#endif /* ! CONFIG_NEED_MULTIPLE_NODES */ #endif /* ! CONFIG_NEED_MULTIPLE_NODES */
......
...@@ -28,6 +28,7 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm) ...@@ -28,6 +28,7 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
{ {
int index; int index;
int err; int err;
int new_context = (mm->context.id == 0);
again: again:
if (!idr_pre_get(&mmu_context_idr, GFP_KERNEL)) if (!idr_pre_get(&mmu_context_idr, GFP_KERNEL))
...@@ -50,9 +51,18 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm) ...@@ -50,9 +51,18 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
} }
mm->context.id = index; mm->context.id = index;
#ifdef CONFIG_PPC_MM_SLICES
/* The old code would re-promote on fork, we don't do that
* when using slices as it could cause problem promoting slices
* that have been forced down to 4K
*/
if (new_context)
slice_set_user_psize(mm, mmu_virtual_psize);
#else
mm->context.user_psize = mmu_virtual_psize; mm->context.user_psize = mmu_virtual_psize;
mm->context.sllp = SLB_VSID_USER | mm->context.sllp = SLB_VSID_USER |
mmu_psize_defs[mmu_virtual_psize].sllp; mmu_psize_defs[mmu_virtual_psize].sllp;
#endif
return 0; return 0;
} }
......
...@@ -185,7 +185,7 @@ void hash_preload(struct mm_struct *mm, unsigned long ea, ...@@ -185,7 +185,7 @@ void hash_preload(struct mm_struct *mm, unsigned long ea,
if (Hash == 0) if (Hash == 0)
return; return;
pmd = pmd_offset(pgd_offset(mm, ea), ea); pmd = pmd_offset(pud_offset(pgd_offset(mm, ea), ea), ea);
if (!pmd_none(*pmd)) if (!pmd_none(*pmd))
add_hash_page(mm->context.id, ea, pmd_val(*pmd)); add_hash_page(mm->context.id, ea, pmd_val(*pmd));
} }
......
...@@ -198,12 +198,6 @@ void slb_initialize(void) ...@@ -198,12 +198,6 @@ void slb_initialize(void)
static int slb_encoding_inited; static int slb_encoding_inited;
extern unsigned int *slb_miss_kernel_load_linear; extern unsigned int *slb_miss_kernel_load_linear;
extern unsigned int *slb_miss_kernel_load_io; extern unsigned int *slb_miss_kernel_load_io;
#ifdef CONFIG_HUGETLB_PAGE
extern unsigned int *slb_miss_user_load_huge;
unsigned long huge_llp;
huge_llp = mmu_psize_defs[mmu_huge_psize].sllp;
#endif
/* Prepare our SLB miss handler based on our page size */ /* Prepare our SLB miss handler based on our page size */
linear_llp = mmu_psize_defs[mmu_linear_psize].sllp; linear_llp = mmu_psize_defs[mmu_linear_psize].sllp;
...@@ -220,11 +214,6 @@ void slb_initialize(void) ...@@ -220,11 +214,6 @@ void slb_initialize(void)
DBG("SLB: linear LLP = %04x\n", linear_llp); DBG("SLB: linear LLP = %04x\n", linear_llp);
DBG("SLB: io LLP = %04x\n", io_llp); DBG("SLB: io LLP = %04x\n", io_llp);
#ifdef CONFIG_HUGETLB_PAGE
patch_slb_encoding(slb_miss_user_load_huge,
SLB_VSID_USER | huge_llp);
DBG("SLB: huge LLP = %04x\n", huge_llp);
#endif
} }
get_paca()->stab_rr = SLB_NUM_BOLTED; get_paca()->stab_rr = SLB_NUM_BOLTED;
......
...@@ -82,31 +82,45 @@ _GLOBAL(slb_miss_kernel_load_io) ...@@ -82,31 +82,45 @@ _GLOBAL(slb_miss_kernel_load_io)
srdi. r9,r10,USER_ESID_BITS srdi. r9,r10,USER_ESID_BITS
bne- 8f /* invalid ea bits set */ bne- 8f /* invalid ea bits set */
/* Figure out if the segment contains huge pages */
#ifdef CONFIG_HUGETLB_PAGE /* when using slices, we extract the psize off the slice bitmaps
BEGIN_FTR_SECTION * and then we need to get the sllp encoding off the mmu_psize_defs
b 1f * array.
END_FTR_SECTION_IFCLR(CPU_FTR_16M_PAGE) *
* XXX This is a bit inefficient especially for the normal case,
* so we should try to implement a fast path for the standard page
* size using the old sllp value so we avoid the array. We cannot
* really do dynamic patching unfortunately as processes might flip
* between 4k and 64k standard page size
*/
#ifdef CONFIG_PPC_MM_SLICES
cmpldi r10,16 cmpldi r10,16
lhz r9,PACALOWHTLBAREAS(r13) /* Get the slice index * 4 in r11 and matching slice size mask in r9 */
mr r11,r10 ld r9,PACALOWSLICESPSIZE(r13)
sldi r11,r10,2
blt 5f blt 5f
ld r9,PACAHIGHSLICEPSIZE(r13)
srdi r11,r10,(SLICE_HIGH_SHIFT - SLICE_LOW_SHIFT - 2)
andi. r11,r11,0x3c
lhz r9,PACAHIGHHTLBAREAS(r13) 5: /* Extract the psize and multiply to get an array offset */
srdi r11,r10,(HTLB_AREA_SHIFT-SID_SHIFT) srd r9,r9,r11
andi. r9,r9,0xf
5: srd r9,r9,r11 mulli r9,r9,MMUPSIZEDEFSIZE
andi. r9,r9,1
beq 1f
_GLOBAL(slb_miss_user_load_huge)
li r11,0
b 2f
1:
#endif /* CONFIG_HUGETLB_PAGE */
/* Now get to the array and obtain the sllp
*/
ld r11,PACATOC(r13)
ld r11,mmu_psize_defs@got(r11)
add r11,r11,r9
ld r11,MMUPSIZESLLP(r11)
ori r11,r11,SLB_VSID_USER
#else
/* paca context sllp already contains the SLB_VSID_USER bits */
lhz r11,PACACONTEXTSLLP(r13) lhz r11,PACACONTEXTSLLP(r13)
2: #endif /* CONFIG_PPC_MM_SLICES */
ld r9,PACACONTEXTID(r13) ld r9,PACACONTEXTID(r13)
rldimi r10,r9,USER_ESID_BITS,0 rldimi r10,r9,USER_ESID_BITS,0
b slb_finish_load b slb_finish_load
......
This diff is collapsed.
...@@ -111,7 +111,7 @@ static void flush_range(struct mm_struct *mm, unsigned long start, ...@@ -111,7 +111,7 @@ static void flush_range(struct mm_struct *mm, unsigned long start,
if (start >= end) if (start >= end)
return; return;
end = (end - 1) | ~PAGE_MASK; end = (end - 1) | ~PAGE_MASK;
pmd = pmd_offset(pgd_offset(mm, start), start); pmd = pmd_offset(pud_offset(pgd_offset(mm, start), start), start);
for (;;) { for (;;) {
pmd_end = ((start + PGDIR_SIZE) & PGDIR_MASK) - 1; pmd_end = ((start + PGDIR_SIZE) & PGDIR_MASK) - 1;
if (pmd_end > end) if (pmd_end > end)
...@@ -169,7 +169,7 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr) ...@@ -169,7 +169,7 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
return; return;
} }
mm = (vmaddr < TASK_SIZE)? vma->vm_mm: &init_mm; mm = (vmaddr < TASK_SIZE)? vma->vm_mm: &init_mm;
pmd = pmd_offset(pgd_offset(mm, vmaddr), vmaddr); pmd = pmd_offset(pud_offset(pgd_offset(mm, vmaddr), vmaddr), vmaddr);
if (!pmd_none(*pmd)) if (!pmd_none(*pmd))
flush_hash_pages(mm->context.id, vmaddr, pmd_val(*pmd), 1); flush_hash_pages(mm->context.id, vmaddr, pmd_val(*pmd), 1);
FINISH_FLUSH; FINISH_FLUSH;
......
...@@ -143,16 +143,22 @@ void hpte_need_flush(struct mm_struct *mm, unsigned long addr, ...@@ -143,16 +143,22 @@ void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
*/ */
addr &= PAGE_MASK; addr &= PAGE_MASK;
/* Get page size (maybe move back to caller) */ /* Get page size (maybe move back to caller).
*
* NOTE: when using special 64K mappings in 4K environment like
* for SPEs, we obtain the page size from the slice, which thus
* must still exist (and thus the VMA not reused) at the time
* of this call
*/
if (huge) { if (huge) {
#ifdef CONFIG_HUGETLB_PAGE #ifdef CONFIG_HUGETLB_PAGE
psize = mmu_huge_psize; psize = mmu_huge_psize;
#else #else
BUG(); BUG();
psize = pte_pagesize_index(pte); /* shutup gcc */ psize = pte_pagesize_index(mm, addr, pte); /* shutup gcc */
#endif #endif
} else } else
psize = pte_pagesize_index(pte); psize = pte_pagesize_index(mm, addr, pte);
/* Build full vaddr */ /* Build full vaddr */
if (!is_kernel_addr(addr)) { if (!is_kernel_addr(addr)) {
......
...@@ -15,8 +15,8 @@ ...@@ -15,8 +15,8 @@
#include <linux/init.h> #include <linux/init.h>
#include <linux/delay.h> #include <linux/delay.h>
#include <asm/pgtable.h>
#include <asm/page.h> #include <asm/page.h>
#include <asm/pgtable.h>
#include <asm/pci-bridge.h> #include <asm/pci-bridge.h>
#include <asm-powerpc/mpic.h> #include <asm-powerpc/mpic.h>
#include <asm/mpc86xx.h> #include <asm/mpc86xx.h>
......
...@@ -35,6 +35,21 @@ config SPU_FS ...@@ -35,6 +35,21 @@ config SPU_FS
Units on machines implementing the Broadband Processor Units on machines implementing the Broadband Processor
Architecture. Architecture.
config SPU_FS_64K_LS
bool "Use 64K pages to map SPE local store"
# we depend on PPC_MM_SLICES for now rather than selecting
# it because we depend on hugetlbfs hooks being present. We
# will fix that when the generic code has been improved to
# not require hijacking hugetlbfs hooks.
depends on SPU_FS && PPC_MM_SLICES && !PPC_64K_PAGES
default y
select PPC_HAS_HASH_64K
help
This option causes SPE local stores to be mapped in process
address spaces using 64K pages while the rest of the kernel
uses 4K pages. This can improve performances of applications
using multiple SPEs by lowering the TLB pressure on them.
config SPU_BASE config SPU_BASE
bool bool
default n default n
......
...@@ -144,12 +144,11 @@ static int __spu_trap_data_seg(struct spu *spu, unsigned long ea) ...@@ -144,12 +144,11 @@ static int __spu_trap_data_seg(struct spu *spu, unsigned long ea)
switch(REGION_ID(ea)) { switch(REGION_ID(ea)) {
case USER_REGION_ID: case USER_REGION_ID:
#ifdef CONFIG_HUGETLB_PAGE #ifdef CONFIG_PPC_MM_SLICES
if (in_hugepage_area(mm->context, ea)) psize = get_slice_psize(mm, ea);
psize = mmu_huge_psize; #else
else
#endif
psize = mm->context.user_psize; psize = mm->context.user_psize;
#endif
vsid = (get_vsid(mm->context.id, ea) << SLB_VSID_SHIFT) | vsid = (get_vsid(mm->context.id, ea) << SLB_VSID_SHIFT) |
SLB_VSID_USER; SLB_VSID_USER;
break; break;
......
obj-y += switch.o fault.o obj-y += switch.o fault.o lscsa_alloc.o
obj-$(CONFIG_SPU_FS) += spufs.o obj-$(CONFIG_SPU_FS) += spufs.o
spufs-y += inode.o file.o context.o syscalls.o coredump.o spufs-y += inode.o file.o context.o syscalls.o coredump.o
......
...@@ -36,10 +36,8 @@ struct spu_context *alloc_spu_context(struct spu_gang *gang) ...@@ -36,10 +36,8 @@ struct spu_context *alloc_spu_context(struct spu_gang *gang)
/* Binding to physical processor deferred /* Binding to physical processor deferred
* until spu_activate(). * until spu_activate().
*/ */
spu_init_csa(&ctx->csa); if (spu_init_csa(&ctx->csa))
if (!ctx->csa.lscsa) {
goto out_free; goto out_free;
}
spin_lock_init(&ctx->mmio_lock); spin_lock_init(&ctx->mmio_lock);
spin_lock_init(&ctx->mapping_lock); spin_lock_init(&ctx->mapping_lock);
kref_init(&ctx->kref); kref_init(&ctx->kref);
......
...@@ -119,13 +119,31 @@ static unsigned long spufs_mem_mmap_nopfn(struct vm_area_struct *vma, ...@@ -119,13 +119,31 @@ static unsigned long spufs_mem_mmap_nopfn(struct vm_area_struct *vma,
unsigned long address) unsigned long address)
{ {
struct spu_context *ctx = vma->vm_file->private_data; struct spu_context *ctx = vma->vm_file->private_data;
unsigned long pfn, offset = address - vma->vm_start; unsigned long pfn, offset, addr0 = address;
#ifdef CONFIG_SPU_FS_64K_LS
struct spu_state *csa = &ctx->csa;
int psize;
offset += vma->vm_pgoff << PAGE_SHIFT; /* Check what page size we are using */
psize = get_slice_psize(vma->vm_mm, address);
/* Some sanity checking */
BUG_ON(csa->use_big_pages != (psize == MMU_PAGE_64K));
/* Wow, 64K, cool, we need to align the address though */
if (csa->use_big_pages) {
BUG_ON(vma->vm_start & 0xffff);
address &= ~0xfffful;
}
#endif /* CONFIG_SPU_FS_64K_LS */
offset = (address - vma->vm_start) + (vma->vm_pgoff << PAGE_SHIFT);
if (offset >= LS_SIZE) if (offset >= LS_SIZE)
return NOPFN_SIGBUS; return NOPFN_SIGBUS;
pr_debug("spufs_mem_mmap_nopfn address=0x%lx -> 0x%lx, offset=0x%lx\n",
addr0, address, offset);
spu_acquire(ctx); spu_acquire(ctx);
if (ctx->state == SPU_STATE_SAVED) { if (ctx->state == SPU_STATE_SAVED) {
...@@ -149,9 +167,24 @@ static struct vm_operations_struct spufs_mem_mmap_vmops = { ...@@ -149,9 +167,24 @@ static struct vm_operations_struct spufs_mem_mmap_vmops = {
.nopfn = spufs_mem_mmap_nopfn, .nopfn = spufs_mem_mmap_nopfn,
}; };
static int static int spufs_mem_mmap(struct file *file, struct vm_area_struct *vma)
spufs_mem_mmap(struct file *file, struct vm_area_struct *vma)
{ {
#ifdef CONFIG_SPU_FS_64K_LS
struct spu_context *ctx = file->private_data;
struct spu_state *csa = &ctx->csa;
/* Sanity check VMA alignment */
if (csa->use_big_pages) {
pr_debug("spufs_mem_mmap 64K, start=0x%lx, end=0x%lx,"
" pgoff=0x%lx\n", vma->vm_start, vma->vm_end,
vma->vm_pgoff);
if (vma->vm_start & 0xffff)
return -EINVAL;
if (vma->vm_pgoff & 0xf)
return -EINVAL;
}
#endif /* CONFIG_SPU_FS_64K_LS */
if (!(vma->vm_flags & VM_SHARED)) if (!(vma->vm_flags & VM_SHARED))
return -EINVAL; return -EINVAL;
...@@ -163,13 +196,34 @@ spufs_mem_mmap(struct file *file, struct vm_area_struct *vma) ...@@ -163,13 +196,34 @@ spufs_mem_mmap(struct file *file, struct vm_area_struct *vma)
return 0; return 0;
} }
#ifdef CONFIG_SPU_FS_64K_LS
unsigned long spufs_get_unmapped_area(struct file *file, unsigned long addr,
unsigned long len, unsigned long pgoff,
unsigned long flags)
{
struct spu_context *ctx = file->private_data;
struct spu_state *csa = &ctx->csa;
/* If not using big pages, fallback to normal MM g_u_a */
if (!csa->use_big_pages)
return current->mm->get_unmapped_area(file, addr, len,
pgoff, flags);
/* Else, try to obtain a 64K pages slice */
return slice_get_unmapped_area(addr, len, flags,
MMU_PAGE_64K, 1, 0);
}
#endif /* CONFIG_SPU_FS_64K_LS */
static const struct file_operations spufs_mem_fops = { static const struct file_operations spufs_mem_fops = {
.open = spufs_mem_open, .open = spufs_mem_open,
.release = spufs_mem_release,
.read = spufs_mem_read, .read = spufs_mem_read,
.write = spufs_mem_write, .write = spufs_mem_write,
.llseek = generic_file_llseek, .llseek = generic_file_llseek,
.mmap = spufs_mem_mmap, .mmap = spufs_mem_mmap,
#ifdef CONFIG_SPU_FS_64K_LS
.get_unmapped_area = spufs_get_unmapped_area,
#endif
}; };
static unsigned long spufs_ps_nopfn(struct vm_area_struct *vma, static unsigned long spufs_ps_nopfn(struct vm_area_struct *vma,
......
/*
* SPU local store allocation routines
*
* Copyright 2007 Benjamin Herrenschmidt, IBM Corp.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#undef DEBUG
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/vmalloc.h>
#include <asm/spu.h>
#include <asm/spu_csa.h>
#include <asm/mmu.h>
static int spu_alloc_lscsa_std(struct spu_state *csa)
{
struct spu_lscsa *lscsa;
unsigned char *p;
lscsa = vmalloc(sizeof(struct spu_lscsa));
if (!lscsa)
return -ENOMEM;
memset(lscsa, 0, sizeof(struct spu_lscsa));
csa->lscsa = lscsa;
/* Set LS pages reserved to allow for user-space mapping. */
for (p = lscsa->ls; p < lscsa->ls + LS_SIZE; p += PAGE_SIZE)
SetPageReserved(vmalloc_to_page(p));
return 0;
}
static void spu_free_lscsa_std(struct spu_state *csa)
{
/* Clear reserved bit before vfree. */
unsigned char *p;
if (csa->lscsa == NULL)
return;
for (p = csa->lscsa->ls; p < csa->lscsa->ls + LS_SIZE; p += PAGE_SIZE)
ClearPageReserved(vmalloc_to_page(p));
vfree(csa->lscsa);
}
#ifdef CONFIG_SPU_FS_64K_LS
#define SPU_64K_PAGE_SHIFT 16
#define SPU_64K_PAGE_ORDER (SPU_64K_PAGE_SHIFT - PAGE_SHIFT)
#define SPU_64K_PAGE_COUNT (1ul << SPU_64K_PAGE_ORDER)
int spu_alloc_lscsa(struct spu_state *csa)
{
struct page **pgarray;
unsigned char *p;
int i, j, n_4k;
/* Check availability of 64K pages */
if (mmu_psize_defs[MMU_PAGE_64K].shift == 0)
goto fail;
csa->use_big_pages = 1;
pr_debug("spu_alloc_lscsa(csa=0x%p), trying to allocate 64K pages\n",
csa);
/* First try to allocate our 64K pages. We need 5 of them
* with the current implementation. In the future, we should try
* to separate the lscsa with the actual local store image, thus
* allowing us to require only 4 64K pages per context
*/
for (i = 0; i < SPU_LSCSA_NUM_BIG_PAGES; i++) {
/* XXX This is likely to fail, we should use a special pool
* similiar to what hugetlbfs does.
*/
csa->lscsa_pages[i] = alloc_pages(GFP_KERNEL,
SPU_64K_PAGE_ORDER);
if (csa->lscsa_pages[i] == NULL)
goto fail;
}
pr_debug(" success ! creating vmap...\n");
/* Now we need to create a vmalloc mapping of these for the kernel
* and SPU context switch code to use. Currently, we stick to a
* normal kernel vmalloc mapping, which in our case will be 4K
*/
n_4k = SPU_64K_PAGE_COUNT * SPU_LSCSA_NUM_BIG_PAGES;
pgarray = kmalloc(sizeof(struct page *) * n_4k, GFP_KERNEL);
if (pgarray == NULL)
goto fail;
for (i = 0; i < SPU_LSCSA_NUM_BIG_PAGES; i++)
for (j = 0; j < SPU_64K_PAGE_COUNT; j++)
/* We assume all the struct page's are contiguous
* which should be hopefully the case for an order 4
* allocation..
*/
pgarray[i * SPU_64K_PAGE_COUNT + j] =
csa->lscsa_pages[i] + j;
csa->lscsa = vmap(pgarray, n_4k, VM_USERMAP, PAGE_KERNEL);
kfree(pgarray);
if (csa->lscsa == NULL)
goto fail;
memset(csa->lscsa, 0, sizeof(struct spu_lscsa));
/* Set LS pages reserved to allow for user-space mapping.
*
* XXX isn't that a bit obsolete ? I think we should just
* make sure the page count is high enough. Anyway, won't harm
* for now
*/
for (p = csa->lscsa->ls; p < csa->lscsa->ls + LS_SIZE; p += PAGE_SIZE)
SetPageReserved(vmalloc_to_page(p));
pr_debug(" all good !\n");
return 0;
fail:
pr_debug("spufs: failed to allocate lscsa 64K pages, falling back\n");
spu_free_lscsa(csa);
return spu_alloc_lscsa_std(csa);
}
void spu_free_lscsa(struct spu_state *csa)
{
unsigned char *p;
int i;
if (!csa->use_big_pages) {
spu_free_lscsa_std(csa);
return;
}
csa->use_big_pages = 0;
if (csa->lscsa == NULL)
goto free_pages;
for (p = csa->lscsa->ls; p < csa->lscsa->ls + LS_SIZE; p += PAGE_SIZE)
ClearPageReserved(vmalloc_to_page(p));
vunmap(csa->lscsa);
csa->lscsa = NULL;
free_pages:
for (i = 0; i < SPU_LSCSA_NUM_BIG_PAGES; i++)
if (csa->lscsa_pages[i])
__free_pages(csa->lscsa_pages[i], SPU_64K_PAGE_ORDER);
}
#else /* CONFIG_SPU_FS_64K_LS */
int spu_alloc_lscsa(struct spu_state *csa)
{
return spu_alloc_lscsa_std(csa);
}
void spu_free_lscsa(struct spu_state *csa)
{
spu_free_lscsa_std(csa);
}
#endif /* !defined(CONFIG_SPU_FS_64K_LS) */
...@@ -2188,40 +2188,30 @@ static void init_priv2(struct spu_state *csa) ...@@ -2188,40 +2188,30 @@ static void init_priv2(struct spu_state *csa)
* as it is by far the largest of the context save regions, * as it is by far the largest of the context save regions,
* and may need to be pinned or otherwise specially aligned. * and may need to be pinned or otherwise specially aligned.
*/ */
void spu_init_csa(struct spu_state *csa) int spu_init_csa(struct spu_state *csa)
{ {
struct spu_lscsa *lscsa; int rc;
unsigned char *p;
if (!csa) if (!csa)
return; return -EINVAL;
memset(csa, 0, sizeof(struct spu_state)); memset(csa, 0, sizeof(struct spu_state));
lscsa = vmalloc(sizeof(struct spu_lscsa)); rc = spu_alloc_lscsa(csa);
if (!lscsa) if (rc)
return; return rc;
memset(lscsa, 0, sizeof(struct spu_lscsa));
csa->lscsa = lscsa;
spin_lock_init(&csa->register_lock); spin_lock_init(&csa->register_lock);
/* Set LS pages reserved to allow for user-space mapping. */
for (p = lscsa->ls; p < lscsa->ls + LS_SIZE; p += PAGE_SIZE)
SetPageReserved(vmalloc_to_page(p));
init_prob(csa); init_prob(csa);
init_priv1(csa); init_priv1(csa);
init_priv2(csa); init_priv2(csa);
return 0;
} }
EXPORT_SYMBOL_GPL(spu_init_csa); EXPORT_SYMBOL_GPL(spu_init_csa);
void spu_fini_csa(struct spu_state *csa) void spu_fini_csa(struct spu_state *csa)
{ {
/* Clear reserved bit before vfree. */ spu_free_lscsa(csa);
unsigned char *p;
for (p = csa->lscsa->ls; p < csa->lscsa->ls + LS_SIZE; p += PAGE_SIZE)
ClearPageReserved(vmalloc_to_page(p));
vfree(csa->lscsa);
} }
EXPORT_SYMBOL_GPL(spu_fini_csa); EXPORT_SYMBOL_GPL(spu_fini_csa);
...@@ -7,7 +7,9 @@ menu "iSeries device drivers" ...@@ -7,7 +7,9 @@ menu "iSeries device drivers"
depends on PPC_ISERIES depends on PPC_ISERIES
config VIOCONS config VIOCONS
tristate "iSeries Virtual Console Support (Obsolete)" bool "iSeries Virtual Console Support (Obsolete)"
depends on !HVC_ISERIES
default n
help help
This is the old virtual console driver for legacy iSeries. This is the old virtual console driver for legacy iSeries.
You should use the iSeries Hypervisor Virtual Console You should use the iSeries Hypervisor Virtual Console
......
...@@ -100,6 +100,9 @@ static unsigned char slot_errbuf[RTAS_ERROR_LOG_MAX]; ...@@ -100,6 +100,9 @@ static unsigned char slot_errbuf[RTAS_ERROR_LOG_MAX];
static DEFINE_SPINLOCK(slot_errbuf_lock); static DEFINE_SPINLOCK(slot_errbuf_lock);
static int eeh_error_buf_size; static int eeh_error_buf_size;
#define EEH_PCI_REGS_LOG_LEN 4096
static unsigned char pci_regs_buf[EEH_PCI_REGS_LOG_LEN];
/* System monitoring statistics */ /* System monitoring statistics */
static unsigned long no_device; static unsigned long no_device;
static unsigned long no_dn; static unsigned long no_dn;
...@@ -115,7 +118,8 @@ static unsigned long slot_resets; ...@@ -115,7 +118,8 @@ static unsigned long slot_resets;
/* --------------------------------------------------------------- */ /* --------------------------------------------------------------- */
/* Below lies the EEH event infrastructure */ /* Below lies the EEH event infrastructure */
void eeh_slot_error_detail (struct pci_dn *pdn, int severity) static void rtas_slot_error_detail(struct pci_dn *pdn, int severity,
char *driver_log, size_t loglen)
{ {
int config_addr; int config_addr;
unsigned long flags; unsigned long flags;
...@@ -133,7 +137,8 @@ void eeh_slot_error_detail (struct pci_dn *pdn, int severity) ...@@ -133,7 +137,8 @@ void eeh_slot_error_detail (struct pci_dn *pdn, int severity)
rc = rtas_call(ibm_slot_error_detail, rc = rtas_call(ibm_slot_error_detail,
8, 1, NULL, config_addr, 8, 1, NULL, config_addr,
BUID_HI(pdn->phb->buid), BUID_HI(pdn->phb->buid),
BUID_LO(pdn->phb->buid), NULL, 0, BUID_LO(pdn->phb->buid),
virt_to_phys(driver_log), loglen,
virt_to_phys(slot_errbuf), virt_to_phys(slot_errbuf),
eeh_error_buf_size, eeh_error_buf_size,
severity); severity);
...@@ -143,6 +148,84 @@ void eeh_slot_error_detail (struct pci_dn *pdn, int severity) ...@@ -143,6 +148,84 @@ void eeh_slot_error_detail (struct pci_dn *pdn, int severity)
spin_unlock_irqrestore(&slot_errbuf_lock, flags); spin_unlock_irqrestore(&slot_errbuf_lock, flags);
} }
/**
* gather_pci_data - copy assorted PCI config space registers to buff
* @pdn: device to report data for
* @buf: point to buffer in which to log
* @len: amount of room in buffer
*
* This routine captures assorted PCI configuration space data,
* and puts them into a buffer for RTAS error logging.
*/
static size_t gather_pci_data(struct pci_dn *pdn, char * buf, size_t len)
{
u32 cfg;
int cap, i;
int n = 0;
n += scnprintf(buf+n, len-n, "%s\n", pdn->node->full_name);
printk(KERN_WARNING "EEH: of node=%s\n", pdn->node->full_name);
rtas_read_config(pdn, PCI_VENDOR_ID, 4, &cfg);
n += scnprintf(buf+n, len-n, "dev/vend:%08x\n", cfg);
printk(KERN_WARNING "EEH: PCI device/vendor: %08x\n", cfg);
rtas_read_config(pdn, PCI_COMMAND, 4, &cfg);
n += scnprintf(buf+n, len-n, "cmd/stat:%x\n", cfg);
printk(KERN_WARNING "EEH: PCI cmd/status register: %08x\n", cfg);
/* Dump out the PCI-X command and status regs */
cap = pci_find_capability(pdn->pcidev, PCI_CAP_ID_PCIX);
if (cap) {
rtas_read_config(pdn, cap, 4, &cfg);
n += scnprintf(buf+n, len-n, "pcix-cmd:%x\n", cfg);
printk(KERN_WARNING "EEH: PCI-X cmd: %08x\n", cfg);
rtas_read_config(pdn, cap+4, 4, &cfg);
n += scnprintf(buf+n, len-n, "pcix-stat:%x\n", cfg);
printk(KERN_WARNING "EEH: PCI-X status: %08x\n", cfg);
}
/* If PCI-E capable, dump PCI-E cap 10, and the AER */
cap = pci_find_capability(pdn->pcidev, PCI_CAP_ID_EXP);
if (cap) {
n += scnprintf(buf+n, len-n, "pci-e cap10:\n");
printk(KERN_WARNING
"EEH: PCI-E capabilities and status follow:\n");
for (i=0; i<=8; i++) {
rtas_read_config(pdn, cap+4*i, 4, &cfg);
n += scnprintf(buf+n, len-n, "%02x:%x\n", 4*i, cfg);
printk(KERN_WARNING "EEH: PCI-E %02x: %08x\n", i, cfg);
}
cap = pci_find_ext_capability(pdn->pcidev,PCI_EXT_CAP_ID_ERR);
if (cap) {
n += scnprintf(buf+n, len-n, "pci-e AER:\n");
printk(KERN_WARNING
"EEH: PCI-E AER capability register set follows:\n");
for (i=0; i<14; i++) {
rtas_read_config(pdn, cap+4*i, 4, &cfg);
n += scnprintf(buf+n, len-n, "%02x:%x\n", 4*i, cfg);
printk(KERN_WARNING "EEH: PCI-E AER %02x: %08x\n", i, cfg);
}
}
}
return n;
}
void eeh_slot_error_detail(struct pci_dn *pdn, int severity)
{
size_t loglen = 0;
memset(pci_regs_buf, 0, EEH_PCI_REGS_LOG_LEN);
rtas_pci_enable(pdn, EEH_THAW_MMIO);
loglen = gather_pci_data(pdn, pci_regs_buf, EEH_PCI_REGS_LOG_LEN);
rtas_slot_error_detail(pdn, severity, pci_regs_buf, loglen);
}
/** /**
* read_slot_reset_state - Read the reset state of a device node's slot * read_slot_reset_state - Read the reset state of a device node's slot
* @dn: device node to read * @dn: device node to read
......
...@@ -361,11 +361,12 @@ struct pci_dn * handle_eeh_events (struct eeh_event *event) ...@@ -361,11 +361,12 @@ struct pci_dn * handle_eeh_events (struct eeh_event *event)
goto hard_fail; goto hard_fail;
} }
eeh_slot_error_detail(frozen_pdn, 1 /* Temporary Error */);
printk(KERN_WARNING printk(KERN_WARNING
"EEH: This PCI device has failed %d times since last reboot: " "EEH: This PCI device has failed %d times in the last hour:\n",
"location=%s driver=%s pci addr=%s\n", frozen_pdn->eeh_freeze_count);
frozen_pdn->eeh_freeze_count, location, drv_str, pci_str); printk(KERN_WARNING
"EEH: location=%s driver=%s pci addr=%s\n",
location, drv_str, pci_str);
/* Walk the various device drivers attached to this slot through /* Walk the various device drivers attached to this slot through
* a reset sequence, giving each an opportunity to do what it needs * a reset sequence, giving each an opportunity to do what it needs
...@@ -375,6 +376,11 @@ struct pci_dn * handle_eeh_events (struct eeh_event *event) ...@@ -375,6 +376,11 @@ struct pci_dn * handle_eeh_events (struct eeh_event *event)
*/ */
pci_walk_bus(frozen_bus, eeh_report_error, &result); pci_walk_bus(frozen_bus, eeh_report_error, &result);
/* Since rtas may enable MMIO when posting the error log,
* don't post the error log until after all dev drivers
* have been informed. */
eeh_slot_error_detail(frozen_pdn, 1 /* Temporary Error */);
/* If all device drivers were EEH-unaware, then shut /* If all device drivers were EEH-unaware, then shut
* down all of the device drivers, and hope they * down all of the device drivers, and hope they
* go down willingly, without panicing the system. * go down willingly, without panicing the system.
......
...@@ -907,7 +907,7 @@ static int __init fs_enet_of_init(void) ...@@ -907,7 +907,7 @@ static int __init fs_enet_of_init(void)
struct fs_platform_info fs_enet_data; struct fs_platform_info fs_enet_data;
const unsigned int *id; const unsigned int *id;
const unsigned int *phy_addr; const unsigned int *phy_addr;
void *mac_addr; const void *mac_addr;
const phandle *ph; const phandle *ph;
const char *model; const char *model;
......
...@@ -631,7 +631,8 @@ config HVC_CONSOLE ...@@ -631,7 +631,8 @@ config HVC_CONSOLE
config HVC_ISERIES config HVC_ISERIES
bool "iSeries Hypervisor Virtual Console support" bool "iSeries Hypervisor Virtual Console support"
depends on PPC_ISERIES && !VIOCONS depends on PPC_ISERIES
default y
select HVC_DRIVER select HVC_DRIVER
help help
iSeries machines support a hypervisor virtual console. iSeries machines support a hypervisor virtual console.
......
...@@ -351,9 +351,12 @@ typedef unsigned long mm_context_id_t; ...@@ -351,9 +351,12 @@ typedef unsigned long mm_context_id_t;
typedef struct { typedef struct {
mm_context_id_t id; mm_context_id_t id;
u16 user_psize; /* page size index */ u16 user_psize; /* page size index */
u16 sllp; /* SLB entry page size encoding */
#ifdef CONFIG_HUGETLB_PAGE #ifdef CONFIG_PPC_MM_SLICES
u16 low_htlb_areas, high_htlb_areas; u64 low_slices_psize; /* SLB page size encodings */
u64 high_slices_psize; /* 4 bits per slice for now */
#else
u16 sllp; /* SLB page size encoding */
#endif #endif
unsigned long vdso_base; unsigned long vdso_base;
} mm_context_t; } mm_context_t;
......
...@@ -83,8 +83,8 @@ struct paca_struct { ...@@ -83,8 +83,8 @@ struct paca_struct {
mm_context_t context; mm_context_t context;
u16 vmalloc_sllp; u16 vmalloc_sllp;
u16 slb_cache[SLB_CACHE_ENTRIES];
u16 slb_cache_ptr; u16 slb_cache_ptr;
u16 slb_cache[SLB_CACHE_ENTRIES];
/* /*
* then miscellaneous read-write fields * then miscellaneous read-write fields
......
...@@ -88,57 +88,55 @@ extern unsigned int HPAGE_SHIFT; ...@@ -88,57 +88,55 @@ extern unsigned int HPAGE_SHIFT;
#endif /* __ASSEMBLY__ */ #endif /* __ASSEMBLY__ */
#ifdef CONFIG_HUGETLB_PAGE #ifdef CONFIG_PPC_MM_SLICES
#define HTLB_AREA_SHIFT 40 #define SLICE_LOW_SHIFT 28
#define HTLB_AREA_SIZE (1UL << HTLB_AREA_SHIFT) #define SLICE_HIGH_SHIFT 40
#define GET_HTLB_AREA(x) ((x) >> HTLB_AREA_SHIFT)
#define LOW_ESID_MASK(addr, len) \ #define SLICE_LOW_TOP (0x100000000ul)
(((1U << (GET_ESID(min((addr)+(len)-1, 0x100000000UL))+1)) \ #define SLICE_NUM_LOW (SLICE_LOW_TOP >> SLICE_LOW_SHIFT)
- (1U << GET_ESID(min((addr), 0x100000000UL)))) & 0xffff) #define SLICE_NUM_HIGH (PGTABLE_RANGE >> SLICE_HIGH_SHIFT)
#define HTLB_AREA_MASK(addr, len) (((1U << (GET_HTLB_AREA(addr+len-1)+1)) \
- (1U << GET_HTLB_AREA(addr))) & 0xffff)
#define ARCH_HAS_HUGEPAGE_ONLY_RANGE #define GET_LOW_SLICE_INDEX(addr) ((addr) >> SLICE_LOW_SHIFT)
#define ARCH_HAS_HUGETLB_FREE_PGD_RANGE #define GET_HIGH_SLICE_INDEX(addr) ((addr) >> SLICE_HIGH_SHIFT)
#define ARCH_HAS_PREPARE_HUGEPAGE_RANGE
#define ARCH_HAS_SETCLEAR_HUGE_PTE
#define touches_hugepage_low_range(mm, addr, len) \ #ifndef __ASSEMBLY__
(((addr) < 0x100000000UL) \
&& (LOW_ESID_MASK((addr), (len)) & (mm)->context.low_htlb_areas)) struct slice_mask {
#define touches_hugepage_high_range(mm, addr, len) \ u16 low_slices;
((((addr) + (len)) > 0x100000000UL) \ u16 high_slices;
&& (HTLB_AREA_MASK((addr), (len)) & (mm)->context.high_htlb_areas)) };
#define __within_hugepage_low_range(addr, len, segmask) \ struct mm_struct;
( (((addr)+(len)) <= 0x100000000UL) \
&& ((LOW_ESID_MASK((addr), (len)) | (segmask)) == (segmask)))
#define within_hugepage_low_range(addr, len) \
__within_hugepage_low_range((addr), (len), \
current->mm->context.low_htlb_areas)
#define __within_hugepage_high_range(addr, len, zonemask) \
( ((addr) >= 0x100000000UL) \
&& ((HTLB_AREA_MASK((addr), (len)) | (zonemask)) == (zonemask)))
#define within_hugepage_high_range(addr, len) \
__within_hugepage_high_range((addr), (len), \
current->mm->context.high_htlb_areas)
#define is_hugepage_only_range(mm, addr, len) \
(touches_hugepage_high_range((mm), (addr), (len)) || \
touches_hugepage_low_range((mm), (addr), (len)))
#define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
#define in_hugepage_area(context, addr) \ extern unsigned long slice_get_unmapped_area(unsigned long addr,
(cpu_has_feature(CPU_FTR_16M_PAGE) && \ unsigned long len,
( ( (addr) >= 0x100000000UL) \ unsigned long flags,
? ((1 << GET_HTLB_AREA(addr)) & (context).high_htlb_areas) \ unsigned int psize,
: ((1 << GET_ESID(addr)) & (context).low_htlb_areas) ) ) int topdown,
int use_cache);
#else /* !CONFIG_HUGETLB_PAGE */ extern unsigned int get_slice_psize(struct mm_struct *mm,
unsigned long addr);
#define in_hugepage_area(mm, addr) 0 extern void slice_init_context(struct mm_struct *mm, unsigned int psize);
extern void slice_set_user_psize(struct mm_struct *mm, unsigned int psize);
#define ARCH_HAS_HUGEPAGE_ONLY_RANGE
extern int is_hugepage_only_range(struct mm_struct *m,
unsigned long addr,
unsigned long len);
#endif /* __ASSEMBLY__ */
#else
#define slice_init()
#endif /* CONFIG_PPC_MM_SLICES */
#ifdef CONFIG_HUGETLB_PAGE
#define ARCH_HAS_HUGETLB_FREE_PGD_RANGE
#define ARCH_HAS_SETCLEAR_HUGE_PTE
#define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
#endif /* !CONFIG_HUGETLB_PAGE */ #endif /* !CONFIG_HUGETLB_PAGE */
......
...@@ -14,18 +14,11 @@ ...@@ -14,18 +14,11 @@
extern struct kmem_cache *pgtable_cache[]; extern struct kmem_cache *pgtable_cache[];
#ifdef CONFIG_PPC_64K_PAGES
#define PTE_CACHE_NUM 0
#define PMD_CACHE_NUM 1
#define PGD_CACHE_NUM 2
#define HUGEPTE_CACHE_NUM 3
#else
#define PTE_CACHE_NUM 0
#define PMD_CACHE_NUM 1
#define PUD_CACHE_NUM 1
#define PGD_CACHE_NUM 0 #define PGD_CACHE_NUM 0
#define PUD_CACHE_NUM 1
#define PMD_CACHE_NUM 1
#define HUGEPTE_CACHE_NUM 2 #define HUGEPTE_CACHE_NUM 2
#endif #define PTE_NONCACHE_NUM 3 /* from GFP rather than kmem_cache */
static inline pgd_t *pgd_alloc(struct mm_struct *mm) static inline pgd_t *pgd_alloc(struct mm_struct *mm)
{ {
...@@ -91,8 +84,7 @@ static inline void pmd_free(pmd_t *pmd) ...@@ -91,8 +84,7 @@ static inline void pmd_free(pmd_t *pmd)
static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
unsigned long address) unsigned long address)
{ {
return kmem_cache_alloc(pgtable_cache[PTE_CACHE_NUM], return (pte_t *)__get_free_page(GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO);
GFP_KERNEL|__GFP_REPEAT);
} }
static inline struct page *pte_alloc_one(struct mm_struct *mm, static inline struct page *pte_alloc_one(struct mm_struct *mm,
...@@ -103,12 +95,12 @@ static inline struct page *pte_alloc_one(struct mm_struct *mm, ...@@ -103,12 +95,12 @@ static inline struct page *pte_alloc_one(struct mm_struct *mm,
static inline void pte_free_kernel(pte_t *pte) static inline void pte_free_kernel(pte_t *pte)
{ {
kmem_cache_free(pgtable_cache[PTE_CACHE_NUM], pte); free_page((unsigned long)pte);
} }
static inline void pte_free(struct page *ptepage) static inline void pte_free(struct page *ptepage)
{ {
pte_free_kernel(page_address(ptepage)); __free_page(ptepage);
} }
#define PGF_CACHENUM_MASK 0x3 #define PGF_CACHENUM_MASK 0x3
...@@ -130,6 +122,9 @@ static inline void pgtable_free(pgtable_free_t pgf) ...@@ -130,6 +122,9 @@ static inline void pgtable_free(pgtable_free_t pgf)
void *p = (void *)(pgf.val & ~PGF_CACHENUM_MASK); void *p = (void *)(pgf.val & ~PGF_CACHENUM_MASK);
int cachenum = pgf.val & PGF_CACHENUM_MASK; int cachenum = pgf.val & PGF_CACHENUM_MASK;
if (cachenum == PTE_NONCACHE_NUM)
free_page((unsigned long)p);
else
kmem_cache_free(pgtable_cache[cachenum], p); kmem_cache_free(pgtable_cache[cachenum], p);
} }
...@@ -137,7 +132,7 @@ extern void pgtable_free_tlb(struct mmu_gather *tlb, pgtable_free_t pgf); ...@@ -137,7 +132,7 @@ extern void pgtable_free_tlb(struct mmu_gather *tlb, pgtable_free_t pgf);
#define __pte_free_tlb(tlb, ptepage) \ #define __pte_free_tlb(tlb, ptepage) \
pgtable_free_tlb(tlb, pgtable_free_cache(page_address(ptepage), \ pgtable_free_tlb(tlb, pgtable_free_cache(page_address(ptepage), \
PTE_CACHE_NUM, PTE_TABLE_SIZE-1)) PTE_NONCACHE_NUM, PTE_TABLE_SIZE-1))
#define __pmd_free_tlb(tlb, pmd) \ #define __pmd_free_tlb(tlb, pmd) \
pgtable_free_tlb(tlb, pgtable_free_cache(pmd, \ pgtable_free_tlb(tlb, pgtable_free_cache(pmd, \
PMD_CACHE_NUM, PMD_TABLE_SIZE-1)) PMD_CACHE_NUM, PMD_TABLE_SIZE-1))
......
...@@ -80,7 +80,11 @@ ...@@ -80,7 +80,11 @@
#define pte_iterate_hashed_end() } while(0) #define pte_iterate_hashed_end() } while(0)
#define pte_pagesize_index(pte) MMU_PAGE_4K #ifdef CONFIG_PPC_HAS_HASH_64K
#define pte_pagesize_index(mm, addr, pte) get_slice_psize(mm, addr)
#else
#define pte_pagesize_index(mm, addr, pte) MMU_PAGE_4K
#endif
/* /*
* 4-level page tables related bits * 4-level page tables related bits
......
...@@ -35,6 +35,11 @@ ...@@ -35,6 +35,11 @@
#define _PAGE_HPTE_SUB0 0x08000000 /* combo only: first sub page */ #define _PAGE_HPTE_SUB0 0x08000000 /* combo only: first sub page */
#define _PAGE_COMBO 0x10000000 /* this is a combo 4k page */ #define _PAGE_COMBO 0x10000000 /* this is a combo 4k page */
#define _PAGE_4K_PFN 0x20000000 /* PFN is for a single 4k page */ #define _PAGE_4K_PFN 0x20000000 /* PFN is for a single 4k page */
/* Note the full page bits must be in the same location as for normal
* 4k pages as the same asssembly will be used to insert 64K pages
* wether the kernel has CONFIG_PPC_64K_PAGES or not
*/
#define _PAGE_F_SECOND 0x00008000 /* full page: hidx bits */ #define _PAGE_F_SECOND 0x00008000 /* full page: hidx bits */
#define _PAGE_F_GIX 0x00007000 /* full page: hidx bits */ #define _PAGE_F_GIX 0x00007000 /* full page: hidx bits */
...@@ -88,7 +93,7 @@ ...@@ -88,7 +93,7 @@
#define pte_iterate_hashed_end() } while(0); } } while(0) #define pte_iterate_hashed_end() } while(0); } } while(0)
#define pte_pagesize_index(pte) \ #define pte_pagesize_index(mm, addr, pte) \
(((pte) & _PAGE_COMBO)? MMU_PAGE_4K: MMU_PAGE_64K) (((pte) & _PAGE_COMBO)? MMU_PAGE_4K: MMU_PAGE_64K)
#define remap_4k_pfn(vma, addr, pfn, prot) \ #define remap_4k_pfn(vma, addr, pfn, prot) \
......
...@@ -235,6 +235,12 @@ struct spu_priv2_collapsed { ...@@ -235,6 +235,12 @@ struct spu_priv2_collapsed {
*/ */
struct spu_state { struct spu_state {
struct spu_lscsa *lscsa; struct spu_lscsa *lscsa;
#ifdef CONFIG_SPU_FS_64K_LS
int use_big_pages;
/* One struct page per 64k page */
#define SPU_LSCSA_NUM_BIG_PAGES (sizeof(struct spu_lscsa) / 0x10000)
struct page *lscsa_pages[SPU_LSCSA_NUM_BIG_PAGES];
#endif
struct spu_problem_collapsed prob; struct spu_problem_collapsed prob;
struct spu_priv1_collapsed priv1; struct spu_priv1_collapsed priv1;
struct spu_priv2_collapsed priv2; struct spu_priv2_collapsed priv2;
...@@ -247,12 +253,14 @@ struct spu_state { ...@@ -247,12 +253,14 @@ struct spu_state {
spinlock_t register_lock; spinlock_t register_lock;
}; };
extern void spu_init_csa(struct spu_state *csa); extern int spu_init_csa(struct spu_state *csa);
extern void spu_fini_csa(struct spu_state *csa); extern void spu_fini_csa(struct spu_state *csa);
extern int spu_save(struct spu_state *prev, struct spu *spu); extern int spu_save(struct spu_state *prev, struct spu *spu);
extern int spu_restore(struct spu_state *new, struct spu *spu); extern int spu_restore(struct spu_state *new, struct spu *spu);
extern int spu_switch(struct spu_state *prev, struct spu_state *new, extern int spu_switch(struct spu_state *prev, struct spu_state *new,
struct spu *spu); struct spu *spu);
extern int spu_alloc_lscsa(struct spu_state *csa);
extern void spu_free_lscsa(struct spu_state *csa);
#endif /* !__SPU__ */ #endif /* !__SPU__ */
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */
......
...@@ -52,7 +52,15 @@ struct hibernation_ops { ...@@ -52,7 +52,15 @@ struct hibernation_ops {
#if defined(CONFIG_PM) && defined(CONFIG_SOFTWARE_SUSPEND) #if defined(CONFIG_PM) && defined(CONFIG_SOFTWARE_SUSPEND)
/* kernel/power/snapshot.c */ /* kernel/power/snapshot.c */
extern void __init register_nosave_region(unsigned long, unsigned long); extern void __register_nosave_region(unsigned long b, unsigned long e, int km);
static inline void register_nosave_region(unsigned long b, unsigned long e)
{
__register_nosave_region(b, e, 0);
}
static inline void register_nosave_region_late(unsigned long b, unsigned long e)
{
__register_nosave_region(b, e, 1);
}
extern int swsusp_page_is_forbidden(struct page *); extern int swsusp_page_is_forbidden(struct page *);
extern void swsusp_set_page_free(struct page *); extern void swsusp_set_page_free(struct page *);
extern void swsusp_unset_page_free(struct page *); extern void swsusp_unset_page_free(struct page *);
...@@ -62,6 +70,7 @@ extern void hibernation_set_ops(struct hibernation_ops *ops); ...@@ -62,6 +70,7 @@ extern void hibernation_set_ops(struct hibernation_ops *ops);
extern int hibernate(void); extern int hibernate(void);
#else #else
static inline void register_nosave_region(unsigned long b, unsigned long e) {} static inline void register_nosave_region(unsigned long b, unsigned long e) {}
static inline void register_nosave_region_late(unsigned long b, unsigned long e) {}
static inline int swsusp_page_is_forbidden(struct page *p) { return 0; } static inline int swsusp_page_is_forbidden(struct page *p) { return 0; }
static inline void swsusp_set_page_free(struct page *p) {} static inline void swsusp_set_page_free(struct page *p) {}
static inline void swsusp_unset_page_free(struct page *p) {} static inline void swsusp_unset_page_free(struct page *p) {}
......
...@@ -607,7 +607,8 @@ static LIST_HEAD(nosave_regions); ...@@ -607,7 +607,8 @@ static LIST_HEAD(nosave_regions);
*/ */
void __init void __init
register_nosave_region(unsigned long start_pfn, unsigned long end_pfn) __register_nosave_region(unsigned long start_pfn, unsigned long end_pfn,
int use_kmalloc)
{ {
struct nosave_region *region; struct nosave_region *region;
...@@ -623,6 +624,11 @@ register_nosave_region(unsigned long start_pfn, unsigned long end_pfn) ...@@ -623,6 +624,11 @@ register_nosave_region(unsigned long start_pfn, unsigned long end_pfn)
goto Report; goto Report;
} }
} }
if (use_kmalloc) {
/* during init, this shouldn't fail */
region = kmalloc(sizeof(struct nosave_region), GFP_KERNEL);
BUG_ON(!region);
} else
/* This allocation cannot fail */ /* This allocation cannot fail */
region = alloc_bootmem_low(sizeof(struct nosave_region)); region = alloc_bootmem_low(sizeof(struct nosave_region));
region->start_pfn = start_pfn; region->start_pfn = start_pfn;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment