Commit 24cecc37 authored by Catalin Marinas's avatar Catalin Marinas Committed by Linus Torvalds

arm64: Revert support for execute-only user mappings

The ARMv8 64-bit architecture supports execute-only user permissions by
clearing the PTE_USER and PTE_UXN bits, practically making it a mostly
privileged mapping but from which user running at EL0 can still execute.

The downside, however, is that the kernel at EL1 inadvertently reading
such mapping would not trip over the PAN (privileged access never)
protection.

Revert the relevant bits from commit cab15ce6 ("arm64: Introduce
execute-only page access permissions") so that PROT_EXEC implies
PROT_READ (and therefore PTE_USER) until the architecture gains proper
support for execute-only user mappings.

Fixes: cab15ce6 ("arm64: Introduce execute-only page access permissions")
Cc: <stable@vger.kernel.org> # 4.9.x-
Acked-by: default avatarWill Deacon <will@kernel.org>
Signed-off-by: default avatarCatalin Marinas <catalin.marinas@arm.com>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent c79f46a2
...@@ -85,13 +85,12 @@ ...@@ -85,13 +85,12 @@
#define PAGE_SHARED_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_RDONLY | PTE_NG | PTE_PXN | PTE_WRITE) #define PAGE_SHARED_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_RDONLY | PTE_NG | PTE_PXN | PTE_WRITE)
#define PAGE_READONLY __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_RDONLY | PTE_NG | PTE_PXN | PTE_UXN) #define PAGE_READONLY __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_RDONLY | PTE_NG | PTE_PXN | PTE_UXN)
#define PAGE_READONLY_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_RDONLY | PTE_NG | PTE_PXN) #define PAGE_READONLY_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_RDONLY | PTE_NG | PTE_PXN)
#define PAGE_EXECONLY __pgprot(_PAGE_DEFAULT | PTE_RDONLY | PTE_NG | PTE_PXN)
#define __P000 PAGE_NONE #define __P000 PAGE_NONE
#define __P001 PAGE_READONLY #define __P001 PAGE_READONLY
#define __P010 PAGE_READONLY #define __P010 PAGE_READONLY
#define __P011 PAGE_READONLY #define __P011 PAGE_READONLY
#define __P100 PAGE_EXECONLY #define __P100 PAGE_READONLY_EXEC
#define __P101 PAGE_READONLY_EXEC #define __P101 PAGE_READONLY_EXEC
#define __P110 PAGE_READONLY_EXEC #define __P110 PAGE_READONLY_EXEC
#define __P111 PAGE_READONLY_EXEC #define __P111 PAGE_READONLY_EXEC
...@@ -100,7 +99,7 @@ ...@@ -100,7 +99,7 @@
#define __S001 PAGE_READONLY #define __S001 PAGE_READONLY
#define __S010 PAGE_SHARED #define __S010 PAGE_SHARED
#define __S011 PAGE_SHARED #define __S011 PAGE_SHARED
#define __S100 PAGE_EXECONLY #define __S100 PAGE_READONLY_EXEC
#define __S101 PAGE_READONLY_EXEC #define __S101 PAGE_READONLY_EXEC
#define __S110 PAGE_SHARED_EXEC #define __S110 PAGE_SHARED_EXEC
#define __S111 PAGE_SHARED_EXEC #define __S111 PAGE_SHARED_EXEC
......
...@@ -96,12 +96,8 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]; ...@@ -96,12 +96,8 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
#define pte_dirty(pte) (pte_sw_dirty(pte) || pte_hw_dirty(pte)) #define pte_dirty(pte) (pte_sw_dirty(pte) || pte_hw_dirty(pte))
#define pte_valid(pte) (!!(pte_val(pte) & PTE_VALID)) #define pte_valid(pte) (!!(pte_val(pte) & PTE_VALID))
/*
* Execute-only user mappings do not have the PTE_USER bit set. All valid
* kernel mappings have the PTE_UXN bit set.
*/
#define pte_valid_not_user(pte) \ #define pte_valid_not_user(pte) \
((pte_val(pte) & (PTE_VALID | PTE_USER | PTE_UXN)) == (PTE_VALID | PTE_UXN)) ((pte_val(pte) & (PTE_VALID | PTE_USER)) == PTE_VALID)
#define pte_valid_young(pte) \ #define pte_valid_young(pte) \
((pte_val(pte) & (PTE_VALID | PTE_AF)) == (PTE_VALID | PTE_AF)) ((pte_val(pte) & (PTE_VALID | PTE_AF)) == (PTE_VALID | PTE_AF))
#define pte_valid_user(pte) \ #define pte_valid_user(pte) \
...@@ -117,8 +113,8 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]; ...@@ -117,8 +113,8 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
/* /*
* p??_access_permitted() is true for valid user mappings (subject to the * p??_access_permitted() is true for valid user mappings (subject to the
* write permission check) other than user execute-only which do not have the * write permission check). PROT_NONE mappings do not have the PTE_VALID bit
* PTE_USER bit set. PROT_NONE mappings do not have the PTE_VALID bit set. * set.
*/ */
#define pte_access_permitted(pte, write) \ #define pte_access_permitted(pte, write) \
(pte_valid_user(pte) && (!(write) || pte_write(pte))) (pte_valid_user(pte) && (!(write) || pte_write(pte)))
......
...@@ -445,7 +445,7 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr, ...@@ -445,7 +445,7 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr,
const struct fault_info *inf; const struct fault_info *inf;
struct mm_struct *mm = current->mm; struct mm_struct *mm = current->mm;
vm_fault_t fault, major = 0; vm_fault_t fault, major = 0;
unsigned long vm_flags = VM_READ | VM_WRITE; unsigned long vm_flags = VM_READ | VM_WRITE | VM_EXEC;
unsigned int mm_flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE; unsigned int mm_flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
if (kprobe_page_fault(regs, esr)) if (kprobe_page_fault(regs, esr))
......
...@@ -90,12 +90,6 @@ static void unmap_region(struct mm_struct *mm, ...@@ -90,12 +90,6 @@ static void unmap_region(struct mm_struct *mm,
* MAP_PRIVATE r: (no) no r: (yes) yes r: (no) yes r: (no) yes * MAP_PRIVATE r: (no) no r: (yes) yes r: (no) yes r: (no) yes
* w: (no) no w: (no) no w: (copy) copy w: (no) no * w: (no) no w: (no) no w: (copy) copy w: (no) no
* x: (no) no x: (no) yes x: (no) yes x: (yes) yes * x: (no) no x: (no) yes x: (no) yes x: (yes) yes
*
* On arm64, PROT_EXEC has the following behaviour for both MAP_SHARED and
* MAP_PRIVATE:
* r: (no) no
* w: (no) no
* x: (yes) yes
*/ */
pgprot_t protection_map[16] __ro_after_init = { pgprot_t protection_map[16] __ro_after_init = {
__P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111, __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment