Commit 4ffd96c9 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux

Pull arm64 fixes from Will Deacon:
 "A mixture of compiler/static checker resolutions and a couple of MTE
  fixes:

   - Avoid erroneously marking untagged pages with PG_mte_tagged

   - Always reset KASAN tags for destination page in copy_page()

   - Mark PMU header functions 'static inline'

   - Fix some sparse warnings due to missing casts"

* tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux:
  arm64: mte: Do not set PG_mte_tagged if tags were not initialized
  arm64: Also reset KASAN tag if page is not PG_mte_tagged
  arm64: perf: Mark all accessor functions inline
  ARM: perf: Mark all accessor functions inline
  arm64: vdso: Pass (void *) to virt_to_page()
  arm64/mm: mark private VM_FAULT_X defines as vm_fault_t
parents 46be92e5 c4c597f1
...@@ -92,7 +92,7 @@ ...@@ -92,7 +92,7 @@
#define RETURN_READ_PMEVCNTRN(n) \ #define RETURN_READ_PMEVCNTRN(n) \
return read_sysreg(PMEVCNTR##n) return read_sysreg(PMEVCNTR##n)
static unsigned long read_pmevcntrn(int n) static inline unsigned long read_pmevcntrn(int n)
{ {
PMEVN_SWITCH(n, RETURN_READ_PMEVCNTRN); PMEVN_SWITCH(n, RETURN_READ_PMEVCNTRN);
return 0; return 0;
...@@ -100,14 +100,14 @@ static unsigned long read_pmevcntrn(int n) ...@@ -100,14 +100,14 @@ static unsigned long read_pmevcntrn(int n)
#define WRITE_PMEVCNTRN(n) \ #define WRITE_PMEVCNTRN(n) \
write_sysreg(val, PMEVCNTR##n) write_sysreg(val, PMEVCNTR##n)
static void write_pmevcntrn(int n, unsigned long val) static inline void write_pmevcntrn(int n, unsigned long val)
{ {
PMEVN_SWITCH(n, WRITE_PMEVCNTRN); PMEVN_SWITCH(n, WRITE_PMEVCNTRN);
} }
#define WRITE_PMEVTYPERN(n) \ #define WRITE_PMEVTYPERN(n) \
write_sysreg(val, PMEVTYPER##n) write_sysreg(val, PMEVTYPER##n)
static void write_pmevtypern(int n, unsigned long val) static inline void write_pmevtypern(int n, unsigned long val)
{ {
PMEVN_SWITCH(n, WRITE_PMEVTYPERN); PMEVN_SWITCH(n, WRITE_PMEVTYPERN);
} }
......
...@@ -13,7 +13,7 @@ ...@@ -13,7 +13,7 @@
#define RETURN_READ_PMEVCNTRN(n) \ #define RETURN_READ_PMEVCNTRN(n) \
return read_sysreg(pmevcntr##n##_el0) return read_sysreg(pmevcntr##n##_el0)
static unsigned long read_pmevcntrn(int n) static inline unsigned long read_pmevcntrn(int n)
{ {
PMEVN_SWITCH(n, RETURN_READ_PMEVCNTRN); PMEVN_SWITCH(n, RETURN_READ_PMEVCNTRN);
return 0; return 0;
...@@ -21,14 +21,14 @@ static unsigned long read_pmevcntrn(int n) ...@@ -21,14 +21,14 @@ static unsigned long read_pmevcntrn(int n)
#define WRITE_PMEVCNTRN(n) \ #define WRITE_PMEVCNTRN(n) \
write_sysreg(val, pmevcntr##n##_el0) write_sysreg(val, pmevcntr##n##_el0)
static void write_pmevcntrn(int n, unsigned long val) static inline void write_pmevcntrn(int n, unsigned long val)
{ {
PMEVN_SWITCH(n, WRITE_PMEVCNTRN); PMEVN_SWITCH(n, WRITE_PMEVCNTRN);
} }
#define WRITE_PMEVTYPERN(n) \ #define WRITE_PMEVTYPERN(n) \
write_sysreg(val, pmevtyper##n##_el0) write_sysreg(val, pmevtyper##n##_el0)
static void write_pmevtypern(int n, unsigned long val) static inline void write_pmevtypern(int n, unsigned long val)
{ {
PMEVN_SWITCH(n, WRITE_PMEVTYPERN); PMEVN_SWITCH(n, WRITE_PMEVTYPERN);
} }
......
...@@ -66,13 +66,10 @@ void mte_sync_tags(pte_t old_pte, pte_t pte) ...@@ -66,13 +66,10 @@ void mte_sync_tags(pte_t old_pte, pte_t pte)
return; return;
/* if PG_mte_tagged is set, tags have already been initialised */ /* if PG_mte_tagged is set, tags have already been initialised */
for (i = 0; i < nr_pages; i++, page++) { for (i = 0; i < nr_pages; i++, page++)
if (!page_mte_tagged(page)) { if (!page_mte_tagged(page))
mte_sync_page_tags(page, old_pte, check_swap, mte_sync_page_tags(page, old_pte, check_swap,
pte_is_tagged); pte_is_tagged);
set_page_mte_tagged(page);
}
}
/* ensure the tags are visible before the PTE is set */ /* ensure the tags are visible before the PTE is set */
smp_wmb(); smp_wmb();
......
...@@ -288,7 +288,7 @@ static int aarch32_alloc_kuser_vdso_page(void) ...@@ -288,7 +288,7 @@ static int aarch32_alloc_kuser_vdso_page(void)
memcpy((void *)(vdso_page + 0x1000 - kuser_sz), __kuser_helper_start, memcpy((void *)(vdso_page + 0x1000 - kuser_sz), __kuser_helper_start,
kuser_sz); kuser_sz);
aarch32_vectors_page = virt_to_page(vdso_page); aarch32_vectors_page = virt_to_page((void *)vdso_page);
return 0; return 0;
} }
......
...@@ -21,9 +21,10 @@ void copy_highpage(struct page *to, struct page *from) ...@@ -21,9 +21,10 @@ void copy_highpage(struct page *to, struct page *from)
copy_page(kto, kfrom); copy_page(kto, kfrom);
if (kasan_hw_tags_enabled())
page_kasan_tag_reset(to);
if (system_supports_mte() && page_mte_tagged(from)) { if (system_supports_mte() && page_mte_tagged(from)) {
if (kasan_hw_tags_enabled())
page_kasan_tag_reset(to);
/* It's a new page, shouldn't have been tagged yet */ /* It's a new page, shouldn't have been tagged yet */
WARN_ON_ONCE(!try_page_mte_tagging(to)); WARN_ON_ONCE(!try_page_mte_tagging(to));
mte_copy_page_tags(kto, kfrom); mte_copy_page_tags(kto, kfrom);
......
...@@ -480,8 +480,8 @@ static void do_bad_area(unsigned long far, unsigned long esr, ...@@ -480,8 +480,8 @@ static void do_bad_area(unsigned long far, unsigned long esr,
} }
} }
#define VM_FAULT_BADMAP 0x010000 #define VM_FAULT_BADMAP ((__force vm_fault_t)0x010000)
#define VM_FAULT_BADACCESS 0x020000 #define VM_FAULT_BADACCESS ((__force vm_fault_t)0x020000)
static vm_fault_t __do_page_fault(struct mm_struct *mm, unsigned long addr, static vm_fault_t __do_page_fault(struct mm_struct *mm, unsigned long addr,
unsigned int mm_flags, unsigned long vm_flags, unsigned int mm_flags, unsigned long vm_flags,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment