Commit 4c86d2f5 authored by Alexander Gordeev's avatar Alexander Gordeev Committed by Vasily Gorbik

s390/mm: fix phys vs virt confusion in vmem_*() functions family

Due to historical reasons vmem_*() functions misuse
or ignore the notion of physical vs virtual addresses
difference.
Signed-off-by: default avatarAlexander Gordeev <agordeev@linux.ibm.com>
Reviewed-by: default avatarHeiko Carstens <hca@linux.ibm.com>
Signed-off-by: default avatarVasily Gorbik <gor@linux.ibm.com>
parent 2a444fdc
...@@ -26,14 +26,14 @@ static void __ref *vmem_alloc_pages(unsigned int order) ...@@ -26,14 +26,14 @@ static void __ref *vmem_alloc_pages(unsigned int order)
if (slab_is_available()) if (slab_is_available())
return (void *)__get_free_pages(GFP_KERNEL, order); return (void *)__get_free_pages(GFP_KERNEL, order);
return (void *) memblock_phys_alloc(size, size); return memblock_alloc(size, size);
} }
static void vmem_free_pages(unsigned long addr, int order) static void vmem_free_pages(unsigned long addr, int order)
{ {
/* We don't expect boot memory to be removed ever. */ /* We don't expect boot memory to be removed ever. */
if (!slab_is_available() || if (!slab_is_available() ||
WARN_ON_ONCE(PageReserved(phys_to_page(addr)))) WARN_ON_ONCE(PageReserved(virt_to_page(addr))))
return; return;
free_pages(addr, order); free_pages(addr, order);
} }
...@@ -56,7 +56,7 @@ pte_t __ref *vmem_pte_alloc(void) ...@@ -56,7 +56,7 @@ pte_t __ref *vmem_pte_alloc(void)
if (slab_is_available()) if (slab_is_available())
pte = (pte_t *) page_table_alloc(&init_mm); pte = (pte_t *) page_table_alloc(&init_mm);
else else
pte = (pte_t *) memblock_phys_alloc(size, size); pte = (pte_t *) memblock_alloc(size, size);
if (!pte) if (!pte)
return NULL; return NULL;
memset64((u64 *)pte, _PAGE_INVALID, PTRS_PER_PTE); memset64((u64 *)pte, _PAGE_INVALID, PTRS_PER_PTE);
...@@ -84,7 +84,7 @@ static void vmemmap_flush_unused_sub_pmd(void) ...@@ -84,7 +84,7 @@ static void vmemmap_flush_unused_sub_pmd(void)
{ {
if (!unused_sub_pmd_start) if (!unused_sub_pmd_start)
return; return;
memset(__va(unused_sub_pmd_start), PAGE_UNUSED, memset((void *)unused_sub_pmd_start, PAGE_UNUSED,
ALIGN(unused_sub_pmd_start, PMD_SIZE) - unused_sub_pmd_start); ALIGN(unused_sub_pmd_start, PMD_SIZE) - unused_sub_pmd_start);
unused_sub_pmd_start = 0; unused_sub_pmd_start = 0;
} }
...@@ -97,7 +97,7 @@ static void vmemmap_mark_sub_pmd_used(unsigned long start, unsigned long end) ...@@ -97,7 +97,7 @@ static void vmemmap_mark_sub_pmd_used(unsigned long start, unsigned long end)
* getting removed (just in case the memmap never gets initialized, * getting removed (just in case the memmap never gets initialized,
* e.g., because the memory block never gets onlined). * e.g., because the memory block never gets onlined).
*/ */
memset(__va(start), 0, sizeof(struct page)); memset((void *)start, 0, sizeof(struct page));
} }
static void vmemmap_use_sub_pmd(unsigned long start, unsigned long end) static void vmemmap_use_sub_pmd(unsigned long start, unsigned long end)
...@@ -118,7 +118,7 @@ static void vmemmap_use_sub_pmd(unsigned long start, unsigned long end) ...@@ -118,7 +118,7 @@ static void vmemmap_use_sub_pmd(unsigned long start, unsigned long end)
static void vmemmap_use_new_sub_pmd(unsigned long start, unsigned long end) static void vmemmap_use_new_sub_pmd(unsigned long start, unsigned long end)
{ {
void *page = __va(ALIGN_DOWN(start, PMD_SIZE)); unsigned long page = ALIGN_DOWN(start, PMD_SIZE);
vmemmap_flush_unused_sub_pmd(); vmemmap_flush_unused_sub_pmd();
...@@ -127,7 +127,7 @@ static void vmemmap_use_new_sub_pmd(unsigned long start, unsigned long end) ...@@ -127,7 +127,7 @@ static void vmemmap_use_new_sub_pmd(unsigned long start, unsigned long end)
/* Mark the unused parts of the new memmap page PAGE_UNUSED. */ /* Mark the unused parts of the new memmap page PAGE_UNUSED. */
if (!IS_ALIGNED(start, PMD_SIZE)) if (!IS_ALIGNED(start, PMD_SIZE))
memset(page, PAGE_UNUSED, start - __pa(page)); memset((void *)page, PAGE_UNUSED, start - page);
/* /*
* We want to avoid memset(PAGE_UNUSED) when populating the vmemmap of * We want to avoid memset(PAGE_UNUSED) when populating the vmemmap of
* consecutive sections. Remember for the last added PMD the last * consecutive sections. Remember for the last added PMD the last
...@@ -140,11 +140,11 @@ static void vmemmap_use_new_sub_pmd(unsigned long start, unsigned long end) ...@@ -140,11 +140,11 @@ static void vmemmap_use_new_sub_pmd(unsigned long start, unsigned long end)
/* Returns true if the PMD is completely unused and can be freed. */ /* Returns true if the PMD is completely unused and can be freed. */
static bool vmemmap_unuse_sub_pmd(unsigned long start, unsigned long end) static bool vmemmap_unuse_sub_pmd(unsigned long start, unsigned long end)
{ {
void *page = __va(ALIGN_DOWN(start, PMD_SIZE)); unsigned long page = ALIGN_DOWN(start, PMD_SIZE);
vmemmap_flush_unused_sub_pmd(); vmemmap_flush_unused_sub_pmd();
memset(__va(start), PAGE_UNUSED, end - start); memset((void *)start, PAGE_UNUSED, end - start);
return !memchr_inv(page, PAGE_UNUSED, PMD_SIZE); return !memchr_inv((void *)page, PAGE_UNUSED, PMD_SIZE);
} }
/* __ref: we'll only call vmemmap_alloc_block() via vmemmap_populate() */ /* __ref: we'll only call vmemmap_alloc_block() via vmemmap_populate() */
...@@ -165,7 +165,7 @@ static int __ref modify_pte_table(pmd_t *pmd, unsigned long addr, ...@@ -165,7 +165,7 @@ static int __ref modify_pte_table(pmd_t *pmd, unsigned long addr,
if (pte_none(*pte)) if (pte_none(*pte))
continue; continue;
if (!direct) if (!direct)
vmem_free_pages(pfn_to_phys(pte_pfn(*pte)), 0); vmem_free_pages((unsigned long) pfn_to_virt(pte_pfn(*pte)), 0);
pte_clear(&init_mm, addr, pte); pte_clear(&init_mm, addr, pte);
} else if (pte_none(*pte)) { } else if (pte_none(*pte)) {
if (!direct) { if (!direct) {
...@@ -175,7 +175,7 @@ static int __ref modify_pte_table(pmd_t *pmd, unsigned long addr, ...@@ -175,7 +175,7 @@ static int __ref modify_pte_table(pmd_t *pmd, unsigned long addr,
goto out; goto out;
pte_val(*pte) = __pa(new_page) | prot; pte_val(*pte) = __pa(new_page) | prot;
} else { } else {
pte_val(*pte) = addr | prot; pte_val(*pte) = __pa(addr) | prot;
} }
} else { } else {
continue; continue;
...@@ -200,7 +200,7 @@ static void try_free_pte_table(pmd_t *pmd, unsigned long start) ...@@ -200,7 +200,7 @@ static void try_free_pte_table(pmd_t *pmd, unsigned long start)
if (!pte_none(*pte)) if (!pte_none(*pte))
return; return;
} }
vmem_pte_free(__va(pmd_deref(*pmd))); vmem_pte_free((unsigned long *) pmd_deref(*pmd));
pmd_clear(pmd); pmd_clear(pmd);
} }
...@@ -241,7 +241,7 @@ static int __ref modify_pmd_table(pud_t *pud, unsigned long addr, ...@@ -241,7 +241,7 @@ static int __ref modify_pmd_table(pud_t *pud, unsigned long addr,
IS_ALIGNED(next, PMD_SIZE) && IS_ALIGNED(next, PMD_SIZE) &&
MACHINE_HAS_EDAT1 && addr && direct && MACHINE_HAS_EDAT1 && addr && direct &&
!debug_pagealloc_enabled()) { !debug_pagealloc_enabled()) {
pmd_val(*pmd) = addr | prot; pmd_val(*pmd) = __pa(addr) | prot;
pages++; pages++;
continue; continue;
} else if (!direct && MACHINE_HAS_EDAT1) { } else if (!direct && MACHINE_HAS_EDAT1) {
...@@ -337,7 +337,7 @@ static int modify_pud_table(p4d_t *p4d, unsigned long addr, unsigned long end, ...@@ -337,7 +337,7 @@ static int modify_pud_table(p4d_t *p4d, unsigned long addr, unsigned long end,
IS_ALIGNED(next, PUD_SIZE) && IS_ALIGNED(next, PUD_SIZE) &&
MACHINE_HAS_EDAT2 && addr && direct && MACHINE_HAS_EDAT2 && addr && direct &&
!debug_pagealloc_enabled()) { !debug_pagealloc_enabled()) {
pud_val(*pud) = addr | prot; pud_val(*pud) = __pa(addr) | prot;
pages++; pages++;
continue; continue;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment