Commit 0cd2a787 authored by Christian Borntraeger's avatar Christian Borntraeger

s390/gmap: make gmap memcg aware

gmap allocations can be attributed to a process.
Signed-off-by: default avatarChristian Borntraeger <borntraeger@de.ibm.com>
Acked-by: default avatarHeiko Carstens <hca@linux.ibm.com>
Acked-by: default avatarJanosch Frank <frankja@linux.ibm.com>
Acked-by: default avatarCornelia Huck <cohuck@redhat.com>
parent c4196218
...@@ -2,7 +2,7 @@ ...@@ -2,7 +2,7 @@
/* /*
* KVM guest address space mapping code * KVM guest address space mapping code
* *
* Copyright IBM Corp. 2007, 2016, 2018 * Copyright IBM Corp. 2007, 2020
* Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com> * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
* David Hildenbrand <david@redhat.com> * David Hildenbrand <david@redhat.com>
* Janosch Frank <frankja@linux.vnet.ibm.com> * Janosch Frank <frankja@linux.vnet.ibm.com>
...@@ -56,19 +56,19 @@ static struct gmap *gmap_alloc(unsigned long limit) ...@@ -56,19 +56,19 @@ static struct gmap *gmap_alloc(unsigned long limit)
atype = _ASCE_TYPE_REGION1; atype = _ASCE_TYPE_REGION1;
etype = _REGION1_ENTRY_EMPTY; etype = _REGION1_ENTRY_EMPTY;
} }
gmap = kzalloc(sizeof(struct gmap), GFP_KERNEL); gmap = kzalloc(sizeof(struct gmap), GFP_KERNEL_ACCOUNT);
if (!gmap) if (!gmap)
goto out; goto out;
INIT_LIST_HEAD(&gmap->crst_list); INIT_LIST_HEAD(&gmap->crst_list);
INIT_LIST_HEAD(&gmap->children); INIT_LIST_HEAD(&gmap->children);
INIT_LIST_HEAD(&gmap->pt_list); INIT_LIST_HEAD(&gmap->pt_list);
INIT_RADIX_TREE(&gmap->guest_to_host, GFP_KERNEL); INIT_RADIX_TREE(&gmap->guest_to_host, GFP_KERNEL_ACCOUNT);
INIT_RADIX_TREE(&gmap->host_to_guest, GFP_ATOMIC); INIT_RADIX_TREE(&gmap->host_to_guest, GFP_ATOMIC | __GFP_ACCOUNT);
INIT_RADIX_TREE(&gmap->host_to_rmap, GFP_ATOMIC); INIT_RADIX_TREE(&gmap->host_to_rmap, GFP_ATOMIC | __GFP_ACCOUNT);
spin_lock_init(&gmap->guest_table_lock); spin_lock_init(&gmap->guest_table_lock);
spin_lock_init(&gmap->shadow_lock); spin_lock_init(&gmap->shadow_lock);
refcount_set(&gmap->ref_count, 1); refcount_set(&gmap->ref_count, 1);
page = alloc_pages(GFP_KERNEL, CRST_ALLOC_ORDER); page = alloc_pages(GFP_KERNEL_ACCOUNT, CRST_ALLOC_ORDER);
if (!page) if (!page)
goto out_free; goto out_free;
page->index = 0; page->index = 0;
...@@ -309,7 +309,7 @@ static int gmap_alloc_table(struct gmap *gmap, unsigned long *table, ...@@ -309,7 +309,7 @@ static int gmap_alloc_table(struct gmap *gmap, unsigned long *table,
unsigned long *new; unsigned long *new;
/* since we dont free the gmap table until gmap_free we can unlock */ /* since we dont free the gmap table until gmap_free we can unlock */
page = alloc_pages(GFP_KERNEL, CRST_ALLOC_ORDER); page = alloc_pages(GFP_KERNEL_ACCOUNT, CRST_ALLOC_ORDER);
if (!page) if (!page)
return -ENOMEM; return -ENOMEM;
new = (unsigned long *) page_to_phys(page); new = (unsigned long *) page_to_phys(page);
...@@ -594,7 +594,7 @@ int __gmap_link(struct gmap *gmap, unsigned long gaddr, unsigned long vmaddr) ...@@ -594,7 +594,7 @@ int __gmap_link(struct gmap *gmap, unsigned long gaddr, unsigned long vmaddr)
if (pmd_large(*pmd) && !gmap->mm->context.allow_gmap_hpage_1m) if (pmd_large(*pmd) && !gmap->mm->context.allow_gmap_hpage_1m)
return -EFAULT; return -EFAULT;
/* Link gmap segment table entry location to page table. */ /* Link gmap segment table entry location to page table. */
rc = radix_tree_preload(GFP_KERNEL); rc = radix_tree_preload(GFP_KERNEL_ACCOUNT);
if (rc) if (rc)
return rc; return rc;
ptl = pmd_lock(mm, pmd); ptl = pmd_lock(mm, pmd);
...@@ -1218,11 +1218,11 @@ static int gmap_protect_rmap(struct gmap *sg, unsigned long raddr, ...@@ -1218,11 +1218,11 @@ static int gmap_protect_rmap(struct gmap *sg, unsigned long raddr,
vmaddr = __gmap_translate(parent, paddr); vmaddr = __gmap_translate(parent, paddr);
if (IS_ERR_VALUE(vmaddr)) if (IS_ERR_VALUE(vmaddr))
return vmaddr; return vmaddr;
rmap = kzalloc(sizeof(*rmap), GFP_KERNEL); rmap = kzalloc(sizeof(*rmap), GFP_KERNEL_ACCOUNT);
if (!rmap) if (!rmap)
return -ENOMEM; return -ENOMEM;
rmap->raddr = raddr; rmap->raddr = raddr;
rc = radix_tree_preload(GFP_KERNEL); rc = radix_tree_preload(GFP_KERNEL_ACCOUNT);
if (rc) { if (rc) {
kfree(rmap); kfree(rmap);
return rc; return rc;
...@@ -1741,7 +1741,7 @@ int gmap_shadow_r2t(struct gmap *sg, unsigned long saddr, unsigned long r2t, ...@@ -1741,7 +1741,7 @@ int gmap_shadow_r2t(struct gmap *sg, unsigned long saddr, unsigned long r2t,
BUG_ON(!gmap_is_shadow(sg)); BUG_ON(!gmap_is_shadow(sg));
/* Allocate a shadow region second table */ /* Allocate a shadow region second table */
page = alloc_pages(GFP_KERNEL, CRST_ALLOC_ORDER); page = alloc_pages(GFP_KERNEL_ACCOUNT, CRST_ALLOC_ORDER);
if (!page) if (!page)
return -ENOMEM; return -ENOMEM;
page->index = r2t & _REGION_ENTRY_ORIGIN; page->index = r2t & _REGION_ENTRY_ORIGIN;
...@@ -1825,7 +1825,7 @@ int gmap_shadow_r3t(struct gmap *sg, unsigned long saddr, unsigned long r3t, ...@@ -1825,7 +1825,7 @@ int gmap_shadow_r3t(struct gmap *sg, unsigned long saddr, unsigned long r3t,
BUG_ON(!gmap_is_shadow(sg)); BUG_ON(!gmap_is_shadow(sg));
/* Allocate a shadow region second table */ /* Allocate a shadow region second table */
page = alloc_pages(GFP_KERNEL, CRST_ALLOC_ORDER); page = alloc_pages(GFP_KERNEL_ACCOUNT, CRST_ALLOC_ORDER);
if (!page) if (!page)
return -ENOMEM; return -ENOMEM;
page->index = r3t & _REGION_ENTRY_ORIGIN; page->index = r3t & _REGION_ENTRY_ORIGIN;
...@@ -1909,7 +1909,7 @@ int gmap_shadow_sgt(struct gmap *sg, unsigned long saddr, unsigned long sgt, ...@@ -1909,7 +1909,7 @@ int gmap_shadow_sgt(struct gmap *sg, unsigned long saddr, unsigned long sgt,
BUG_ON(!gmap_is_shadow(sg) || (sgt & _REGION3_ENTRY_LARGE)); BUG_ON(!gmap_is_shadow(sg) || (sgt & _REGION3_ENTRY_LARGE));
/* Allocate a shadow segment table */ /* Allocate a shadow segment table */
page = alloc_pages(GFP_KERNEL, CRST_ALLOC_ORDER); page = alloc_pages(GFP_KERNEL_ACCOUNT, CRST_ALLOC_ORDER);
if (!page) if (!page)
return -ENOMEM; return -ENOMEM;
page->index = sgt & _REGION_ENTRY_ORIGIN; page->index = sgt & _REGION_ENTRY_ORIGIN;
...@@ -2116,7 +2116,7 @@ int gmap_shadow_page(struct gmap *sg, unsigned long saddr, pte_t pte) ...@@ -2116,7 +2116,7 @@ int gmap_shadow_page(struct gmap *sg, unsigned long saddr, pte_t pte)
parent = sg->parent; parent = sg->parent;
prot = (pte_val(pte) & _PAGE_PROTECT) ? PROT_READ : PROT_WRITE; prot = (pte_val(pte) & _PAGE_PROTECT) ? PROT_READ : PROT_WRITE;
rmap = kzalloc(sizeof(*rmap), GFP_KERNEL); rmap = kzalloc(sizeof(*rmap), GFP_KERNEL_ACCOUNT);
if (!rmap) if (!rmap)
return -ENOMEM; return -ENOMEM;
rmap->raddr = (saddr & PAGE_MASK) | _SHADOW_RMAP_PGTABLE; rmap->raddr = (saddr & PAGE_MASK) | _SHADOW_RMAP_PGTABLE;
...@@ -2128,7 +2128,7 @@ int gmap_shadow_page(struct gmap *sg, unsigned long saddr, pte_t pte) ...@@ -2128,7 +2128,7 @@ int gmap_shadow_page(struct gmap *sg, unsigned long saddr, pte_t pte)
rc = vmaddr; rc = vmaddr;
break; break;
} }
rc = radix_tree_preload(GFP_KERNEL); rc = radix_tree_preload(GFP_KERNEL_ACCOUNT);
if (rc) if (rc)
break; break;
rc = -EAGAIN; rc = -EAGAIN;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment