Commit 885f7f8e authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Linus Torvalds

mm: rename flush_icache_user_range to flush_icache_user_page

The function currently known as flush_icache_user_range only operates on
a single page.  Rename it to flush_icache_user_page as we'll need the
name flush_icache_user_range for something else soon.
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Acked-by: default avatarGeert Uytterhoeven <geert@linux-m68k.org>
Cc: Richard Henderson <rth@twiddle.net>
Cc: Ivan Kokshaysky <ink@jurassic.park.msu.ru>
Cc: Matt Turner <mattst88@gmail.com>
Cc: Tony Luck <tony.luck@intel.com>
Cc: Fenghua Yu <fenghua.yu@intel.com>
Cc: Geert Uytterhoeven <geert@linux-m68k.org>
Cc: Greentime Hu <green.hu@gmail.com>
Cc: Vincent Chen <deanbo422@gmail.com>
Cc: Jonas Bonn <jonas@southpole.se>
Cc: Stefan Kristiansson <stefan.kristiansson@saunalahti.fi>
Cc: Stafford Horne <shorne@gmail.com>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Paul Walmsley <paul.walmsley@sifive.com>
Cc: Palmer Dabbelt <palmer@sifive.com>
Cc: Albert Ou <aou@eecs.berkeley.edu>
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Arnaldo Carvalho de Melo <acme@kernel.org>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Link: http://lkml.kernel.org/r/20200515143646.3857579-20-hch@lst.deSigned-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 97f52c15
...@@ -35,7 +35,7 @@ extern void smp_imb(void); ...@@ -35,7 +35,7 @@ extern void smp_imb(void);
extern void __load_new_mm_context(struct mm_struct *); extern void __load_new_mm_context(struct mm_struct *);
static inline void static inline void
flush_icache_user_range(struct vm_area_struct *vma, struct page *page, flush_icache_user_page(struct vm_area_struct *vma, struct page *page,
unsigned long addr, int len) unsigned long addr, int len)
{ {
if (vma->vm_flags & VM_EXEC) { if (vma->vm_flags & VM_EXEC) {
...@@ -46,16 +46,16 @@ flush_icache_user_range(struct vm_area_struct *vma, struct page *page, ...@@ -46,16 +46,16 @@ flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
mm->context[smp_processor_id()] = 0; mm->context[smp_processor_id()] = 0;
} }
} }
#define flush_icache_user_range flush_icache_user_range #define flush_icache_user_page flush_icache_user_page
#else /* CONFIG_SMP */ #else /* CONFIG_SMP */
extern void flush_icache_user_range(struct vm_area_struct *vma, extern void flush_icache_user_page(struct vm_area_struct *vma,
struct page *page, unsigned long addr, int len); struct page *page, unsigned long addr, int len);
#define flush_icache_user_range flush_icache_user_range #define flush_icache_user_page flush_icache_user_page
#endif /* CONFIG_SMP */ #endif /* CONFIG_SMP */
/* This is used only in __do_fault and do_swap_page. */ /* This is used only in __do_fault and do_swap_page. */
#define flush_icache_page(vma, page) \ #define flush_icache_page(vma, page) \
flush_icache_user_range((vma), (page), 0, 0) flush_icache_user_page((vma), (page), 0, 0)
#include <asm-generic/cacheflush.h> #include <asm-generic/cacheflush.h>
......
...@@ -740,7 +740,7 @@ ipi_flush_icache_page(void *x) ...@@ -740,7 +740,7 @@ ipi_flush_icache_page(void *x)
} }
void void
flush_icache_user_range(struct vm_area_struct *vma, struct page *page, flush_icache_user_page(struct vm_area_struct *vma, struct page *page,
unsigned long addr, int len) unsigned long addr, int len)
{ {
struct mm_struct *mm = vma->vm_mm; struct mm_struct *mm = vma->vm_mm;
......
...@@ -22,7 +22,7 @@ extern void flush_icache_range(unsigned long start, unsigned long end); ...@@ -22,7 +22,7 @@ extern void flush_icache_range(unsigned long start, unsigned long end);
#define flush_icache_range flush_icache_range #define flush_icache_range flush_icache_range
extern void clflush_cache_range(void *addr, int size); extern void clflush_cache_range(void *addr, int size);
#define flush_icache_user_range(vma, page, user_addr, len) \ #define flush_icache_user_page(vma, page, user_addr, len) \
do { \ do { \
unsigned long _addr = (unsigned long) page_address(page) + ((user_addr) & ~PAGE_MASK); \ unsigned long _addr = (unsigned long) page_address(page) + ((user_addr) & ~PAGE_MASK); \
flush_icache_range(_addr, _addr + (len)); \ flush_icache_range(_addr, _addr + (len)); \
......
...@@ -254,7 +254,7 @@ static inline void __flush_page_to_ram(void *vaddr) ...@@ -254,7 +254,7 @@ static inline void __flush_page_to_ram(void *vaddr)
#define flush_dcache_mmap_unlock(mapping) do { } while (0) #define flush_dcache_mmap_unlock(mapping) do { } while (0)
#define flush_icache_page(vma, page) __flush_page_to_ram(page_address(page)) #define flush_icache_page(vma, page) __flush_page_to_ram(page_address(page))
extern void flush_icache_user_range(struct vm_area_struct *vma, struct page *page, extern void flush_icache_user_page(struct vm_area_struct *vma, struct page *page,
unsigned long addr, int len); unsigned long addr, int len);
extern void flush_icache_range(unsigned long address, unsigned long endaddr); extern void flush_icache_range(unsigned long address, unsigned long endaddr);
...@@ -264,7 +264,7 @@ static inline void copy_to_user_page(struct vm_area_struct *vma, ...@@ -264,7 +264,7 @@ static inline void copy_to_user_page(struct vm_area_struct *vma,
{ {
flush_cache_page(vma, vaddr, page_to_pfn(page)); flush_cache_page(vma, vaddr, page_to_pfn(page));
memcpy(dst, src, len); memcpy(dst, src, len);
flush_icache_user_range(vma, page, vaddr, len); flush_icache_user_page(vma, page, vaddr, len);
} }
static inline void copy_from_user_page(struct vm_area_struct *vma, static inline void copy_from_user_page(struct vm_area_struct *vma,
struct page *page, unsigned long vaddr, struct page *page, unsigned long vaddr,
......
...@@ -106,7 +106,7 @@ void flush_icache_range(unsigned long address, unsigned long endaddr) ...@@ -106,7 +106,7 @@ void flush_icache_range(unsigned long address, unsigned long endaddr)
} }
EXPORT_SYMBOL(flush_icache_range); EXPORT_SYMBOL(flush_icache_range);
void flush_icache_user_range(struct vm_area_struct *vma, struct page *page, void flush_icache_user_page(struct vm_area_struct *vma, struct page *page,
unsigned long addr, int len) unsigned long addr, int len)
{ {
if (CPU_IS_COLDFIRE) { if (CPU_IS_COLDFIRE) {
......
...@@ -44,9 +44,9 @@ void invalidate_kernel_vmap_range(void *addr, int size); ...@@ -44,9 +44,9 @@ void invalidate_kernel_vmap_range(void *addr, int size);
#define flush_dcache_mmap_unlock(mapping) xa_unlock_irq(&(mapping)->i_pages) #define flush_dcache_mmap_unlock(mapping) xa_unlock_irq(&(mapping)->i_pages)
#else #else
void flush_icache_user_range(struct vm_area_struct *vma, struct page *page, void flush_icache_user_page(struct vm_area_struct *vma, struct page *page,
unsigned long addr, int len); unsigned long addr, int len);
#define flush_icache_user_range flush_icache_user_range #define flush_icache_user_page flush_icache_user_page
#include <asm-generic/cacheflush.h> #include <asm-generic/cacheflush.h>
#endif #endif
......
...@@ -36,7 +36,7 @@ void flush_icache_page(struct vm_area_struct *vma, struct page *page) ...@@ -36,7 +36,7 @@ void flush_icache_page(struct vm_area_struct *vma, struct page *page)
local_irq_restore(flags); local_irq_restore(flags);
} }
void flush_icache_user_range(struct vm_area_struct *vma, struct page *page, void flush_icache_user_page(struct vm_area_struct *vma, struct page *page,
unsigned long addr, int len) unsigned long addr, int len)
{ {
unsigned long kaddr; unsigned long kaddr;
......
...@@ -62,7 +62,7 @@ static inline void flush_dcache_page(struct page *page) ...@@ -62,7 +62,7 @@ static inline void flush_dcache_page(struct page *page)
clear_bit(PG_dc_clean, &page->flags); clear_bit(PG_dc_clean, &page->flags);
} }
#define flush_icache_user_range(vma, page, addr, len) \ #define flush_icache_user_page(vma, page, addr, len) \
do { \ do { \
if (vma->vm_flags & VM_EXEC) \ if (vma->vm_flags & VM_EXEC) \
sync_icache_dcache(page); \ sync_icache_dcache(page); \
......
...@@ -28,9 +28,9 @@ extern void flush_dcache_page(struct page *page); ...@@ -28,9 +28,9 @@ extern void flush_dcache_page(struct page *page);
void flush_icache_range(unsigned long start, unsigned long stop); void flush_icache_range(unsigned long start, unsigned long stop);
#define flush_icache_range flush_icache_range #define flush_icache_range flush_icache_range
void flush_icache_user_range(struct vm_area_struct *vma, struct page *page, void flush_icache_user_page(struct vm_area_struct *vma, struct page *page,
unsigned long addr, int len); unsigned long addr, int len);
#define flush_icache_user_range flush_icache_user_range #define flush_icache_user_page flush_icache_user_page
void flush_dcache_icache_page(struct page *page); void flush_dcache_icache_page(struct page *page);
void __flush_dcache_icache(void *page); void __flush_dcache_icache(void *page);
......
...@@ -577,7 +577,7 @@ void copy_user_page(void *vto, void *vfrom, unsigned long vaddr, ...@@ -577,7 +577,7 @@ void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
flush_dcache_page(pg); flush_dcache_page(pg);
} }
void flush_icache_user_range(struct vm_area_struct *vma, struct page *page, void flush_icache_user_page(struct vm_area_struct *vma, struct page *page,
unsigned long addr, int len) unsigned long addr, int len)
{ {
unsigned long maddr; unsigned long maddr;
......
...@@ -27,7 +27,8 @@ static inline void flush_dcache_page(struct page *page) ...@@ -27,7 +27,8 @@ static inline void flush_dcache_page(struct page *page)
* so instead we just flush the whole thing. * so instead we just flush the whole thing.
*/ */
#define flush_icache_range(start, end) flush_icache_all() #define flush_icache_range(start, end) flush_icache_all()
#define flush_icache_user_range(vma, pg, addr, len) flush_icache_mm(vma->vm_mm, 0) #define flush_icache_user_page(vma, pg, addr, len) \
flush_icache_mm(vma->vm_mm, 0)
#ifndef CONFIG_SMP #ifndef CONFIG_SMP
......
...@@ -73,8 +73,8 @@ static inline void flush_icache_page(struct vm_area_struct *vma, ...@@ -73,8 +73,8 @@ static inline void flush_icache_page(struct vm_area_struct *vma,
} }
#endif #endif
#ifndef flush_icache_user_range #ifndef flush_icache_user_page
static inline void flush_icache_user_range(struct vm_area_struct *vma, static inline void flush_icache_user_page(struct vm_area_struct *vma,
struct page *page, struct page *page,
unsigned long addr, int len) unsigned long addr, int len)
{ {
...@@ -97,7 +97,7 @@ static inline void flush_cache_vunmap(unsigned long start, unsigned long end) ...@@ -97,7 +97,7 @@ static inline void flush_cache_vunmap(unsigned long start, unsigned long end)
#define copy_to_user_page(vma, page, vaddr, dst, src, len) \ #define copy_to_user_page(vma, page, vaddr, dst, src, len) \
do { \ do { \
memcpy(dst, src, len); \ memcpy(dst, src, len); \
flush_icache_user_range(vma, page, vaddr, len); \ flush_icache_user_page(vma, page, vaddr, len); \
} while (0) } while (0)
#endif #endif
......
...@@ -1668,7 +1668,7 @@ void __weak arch_uprobe_copy_ixol(struct page *page, unsigned long vaddr, ...@@ -1668,7 +1668,7 @@ void __weak arch_uprobe_copy_ixol(struct page *page, unsigned long vaddr,
copy_to_page(page, vaddr, src, len); copy_to_page(page, vaddr, src, len);
/* /*
* We probably need flush_icache_user_range() but it needs vma. * We probably need flush_icache_user_page() but it needs vma.
* This should work on most of architectures by default. If * This should work on most of architectures by default. If
* architecture needs to do something different it can define * architecture needs to do something different it can define
* its own version of the function. * its own version of the function.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment