Commit ef629cc5 authored by Aneesh Kumar K.V's avatar Aneesh Kumar K.V Committed by Michael Ellerman

powerc/mm/hash: Reduce hash_mm_context size

Allocate subpage protect related variables only if we use the feature.
This helps in reducing the hash related mm context struct by around 4K

Before the patch
sizeof(struct hash_mm_context)  = 8288

After the patch
sizeof(struct hash_mm_context) = 4160
Signed-off-by: default avatarAneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
parent 70110186
...@@ -687,10 +687,8 @@ struct subpage_prot_table { ...@@ -687,10 +687,8 @@ struct subpage_prot_table {
#define SBP_L3_SHIFT (SBP_L2_SHIFT + SBP_L2_BITS) #define SBP_L3_SHIFT (SBP_L2_SHIFT + SBP_L2_BITS)
extern void subpage_prot_free(struct mm_struct *mm); extern void subpage_prot_free(struct mm_struct *mm);
extern void subpage_prot_init_new_context(struct mm_struct *mm);
#else #else
static inline void subpage_prot_free(struct mm_struct *mm) {} static inline void subpage_prot_free(struct mm_struct *mm) {}
static inline void subpage_prot_init_new_context(struct mm_struct *mm) { }
#endif /* CONFIG_PPC_SUBPAGE_PROT */ #endif /* CONFIG_PPC_SUBPAGE_PROT */
/* /*
...@@ -720,7 +718,7 @@ struct hash_mm_context { ...@@ -720,7 +718,7 @@ struct hash_mm_context {
#endif #endif
#ifdef CONFIG_PPC_SUBPAGE_PROT #ifdef CONFIG_PPC_SUBPAGE_PROT
struct subpage_prot_table spt; struct subpage_prot_table *spt;
#endif /* CONFIG_PPC_SUBPAGE_PROT */ #endif /* CONFIG_PPC_SUBPAGE_PROT */
}; };
......
...@@ -206,7 +206,7 @@ static inline struct slice_mask *mm_ctx_slice_mask_16g(mm_context_t *ctx) ...@@ -206,7 +206,7 @@ static inline struct slice_mask *mm_ctx_slice_mask_16g(mm_context_t *ctx)
#ifdef CONFIG_PPC_SUBPAGE_PROT #ifdef CONFIG_PPC_SUBPAGE_PROT
static inline struct subpage_prot_table *mm_ctx_subpage_prot(mm_context_t *ctx) static inline struct subpage_prot_table *mm_ctx_subpage_prot(mm_context_t *ctx)
{ {
return &ctx->hash_context->spt; return ctx->hash_context->spt;
} }
#endif #endif
......
...@@ -1150,6 +1150,9 @@ static int subpage_protection(struct mm_struct *mm, unsigned long ea) ...@@ -1150,6 +1150,9 @@ static int subpage_protection(struct mm_struct *mm, unsigned long ea)
u32 spp = 0; u32 spp = 0;
u32 **sbpm, *sbpp; u32 **sbpm, *sbpp;
if (!spt)
return 0;
if (ea >= spt->maxaddr) if (ea >= spt->maxaddr)
return 0; return 0;
if (ea < 0x100000000UL) { if (ea < 0x100000000UL) {
......
...@@ -63,7 +63,8 @@ static int hash__init_new_context(struct mm_struct *mm) ...@@ -63,7 +63,8 @@ static int hash__init_new_context(struct mm_struct *mm)
if (index < 0) if (index < 0)
return index; return index;
mm->context.hash_context = kmalloc(sizeof(struct hash_mm_context), GFP_KERNEL); mm->context.hash_context = kmalloc(sizeof(struct hash_mm_context),
GFP_KERNEL);
if (!mm->context.hash_context) { if (!mm->context.hash_context) {
ida_free(&mmu_context_ida, index); ida_free(&mmu_context_ida, index);
return -ENOMEM; return -ENOMEM;
...@@ -89,10 +90,20 @@ static int hash__init_new_context(struct mm_struct *mm) ...@@ -89,10 +90,20 @@ static int hash__init_new_context(struct mm_struct *mm)
} else { } else {
/* This is fork. Copy hash_context details from current->mm */ /* This is fork. Copy hash_context details from current->mm */
memcpy(mm->context.hash_context, current->mm->context.hash_context, sizeof(struct hash_mm_context)); memcpy(mm->context.hash_context, current->mm->context.hash_context, sizeof(struct hash_mm_context));
#ifdef CONFIG_PPC_SUBPAGE_PROT
/* inherit subpage prot detalis if we have one. */
if (current->mm->context.hash_context->spt) {
mm->context.hash_context->spt = kmalloc(sizeof(struct subpage_prot_table),
GFP_KERNEL);
if (!mm->context.hash_context->spt) {
ida_free(&mmu_context_ida, index);
kfree(mm->context.hash_context);
return -ENOMEM;
} }
}
#endif
subpage_prot_init_new_context(mm); }
pkey_mm_init(mm); pkey_mm_init(mm);
return index; return index;
......
...@@ -29,6 +29,9 @@ void subpage_prot_free(struct mm_struct *mm) ...@@ -29,6 +29,9 @@ void subpage_prot_free(struct mm_struct *mm)
unsigned long i, j, addr; unsigned long i, j, addr;
u32 **p; u32 **p;
if (!spt)
return;
for (i = 0; i < 4; ++i) { for (i = 0; i < 4; ++i) {
if (spt->low_prot[i]) { if (spt->low_prot[i]) {
free_page((unsigned long)spt->low_prot[i]); free_page((unsigned long)spt->low_prot[i]);
...@@ -48,13 +51,7 @@ void subpage_prot_free(struct mm_struct *mm) ...@@ -48,13 +51,7 @@ void subpage_prot_free(struct mm_struct *mm)
free_page((unsigned long)p); free_page((unsigned long)p);
} }
spt->maxaddr = 0; spt->maxaddr = 0;
} kfree(spt);
void subpage_prot_init_new_context(struct mm_struct *mm)
{
struct subpage_prot_table *spt = mm_ctx_subpage_prot(&mm->context);
memset(spt, 0, sizeof(*spt));
} }
static void hpte_flush_range(struct mm_struct *mm, unsigned long addr, static void hpte_flush_range(struct mm_struct *mm, unsigned long addr,
...@@ -99,6 +96,9 @@ static void subpage_prot_clear(unsigned long addr, unsigned long len) ...@@ -99,6 +96,9 @@ static void subpage_prot_clear(unsigned long addr, unsigned long len)
size_t nw; size_t nw;
unsigned long next, limit; unsigned long next, limit;
if (!spt)
return ;
down_write(&mm->mmap_sem); down_write(&mm->mmap_sem);
limit = addr + len; limit = addr + len;
if (limit > spt->maxaddr) if (limit > spt->maxaddr)
...@@ -218,6 +218,20 @@ SYSCALL_DEFINE3(subpage_prot, unsigned long, addr, ...@@ -218,6 +218,20 @@ SYSCALL_DEFINE3(subpage_prot, unsigned long, addr,
return -EFAULT; return -EFAULT;
down_write(&mm->mmap_sem); down_write(&mm->mmap_sem);
if (!spt) {
/*
* Allocate subpage prot table if not already done.
* Do this with mmap_sem held
*/
spt = kzalloc(sizeof(struct subpage_prot_table), GFP_KERNEL);
if (!spt) {
err = -ENOMEM;
goto out;
}
mm->context.hash_context->spt = spt;
}
subpage_mark_vma_nohuge(mm, addr, len); subpage_mark_vma_nohuge(mm, addr, len);
for (limit = addr + len; addr < limit; addr = next) { for (limit = addr + len; addr < limit; addr = next) {
next = pmd_addr_end(addr, limit); next = pmd_addr_end(addr, limit);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment