Commit 1882e562 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'devmem-v4.11-rc7' of git://git.kernel.org/pub/scm/linux/kernel/git/kees/linux

Pull CONFIG_STRICT_DEVMEM fix from Kees Cook:
 "Fixes /dev/mem to read back zeros for System RAM areas in the 1MB
  exception area on x86 to avoid exposing RAM or tripping hardened
  usercopy"

* tag 'devmem-v4.11-rc7' of git://git.kernel.org/pub/scm/linux/kernel/git/kees/linux:
  mm: Tighten x86 /dev/mem with zeroing reads
parents 78739333 a4866aa8
...@@ -643,21 +643,40 @@ void __init init_mem_mapping(void) ...@@ -643,21 +643,40 @@ void __init init_mem_mapping(void)
* devmem_is_allowed() checks to see if /dev/mem access to a certain address * devmem_is_allowed() checks to see if /dev/mem access to a certain address
* is valid. The argument is a physical page number. * is valid. The argument is a physical page number.
* *
* * On x86, access has to be given to the first megabyte of RAM because that
* On x86, access has to be given to the first megabyte of ram because that area * area traditionally contains BIOS code and data regions used by X, dosemu,
* contains BIOS code and data regions used by X and dosemu and similar apps. * and similar apps. Since they map the entire memory range, the whole range
* Access has to be given to non-kernel-ram areas as well, these contain the PCI * must be allowed (for mapping), but any areas that would otherwise be
* mmio resources as well as potential bios/acpi data regions. * disallowed are flagged as being "zero filled" instead of rejected.
* Access has to be given to non-kernel-ram areas as well, these contain the
* PCI mmio resources as well as potential bios/acpi data regions.
*/ */
int devmem_is_allowed(unsigned long pagenr) int devmem_is_allowed(unsigned long pagenr)
{ {
if (pagenr < 256) if (page_is_ram(pagenr)) {
return 1; /*
if (iomem_is_exclusive(pagenr << PAGE_SHIFT)) * For disallowed memory regions in the low 1MB range,
* request that the page be shown as all zeros.
*/
if (pagenr < 256)
return 2;
return 0;
}
/*
* This must follow RAM test, since System RAM is considered a
* restricted resource under CONFIG_STRICT_IOMEM.
*/
if (iomem_is_exclusive(pagenr << PAGE_SHIFT)) {
/* Low 1MB bypasses iomem restrictions. */
if (pagenr < 256)
return 1;
return 0; return 0;
if (!page_is_ram(pagenr)) }
return 1;
return 0; return 1;
} }
void free_init_pages(char *what, unsigned long begin, unsigned long end) void free_init_pages(char *what, unsigned long begin, unsigned long end)
......
...@@ -60,6 +60,10 @@ static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t size) ...@@ -60,6 +60,10 @@ static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t size)
#endif #endif
#ifdef CONFIG_STRICT_DEVMEM #ifdef CONFIG_STRICT_DEVMEM
static inline int page_is_allowed(unsigned long pfn)
{
return devmem_is_allowed(pfn);
}
static inline int range_is_allowed(unsigned long pfn, unsigned long size) static inline int range_is_allowed(unsigned long pfn, unsigned long size)
{ {
u64 from = ((u64)pfn) << PAGE_SHIFT; u64 from = ((u64)pfn) << PAGE_SHIFT;
...@@ -75,6 +79,10 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size) ...@@ -75,6 +79,10 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
return 1; return 1;
} }
#else #else
static inline int page_is_allowed(unsigned long pfn)
{
return 1;
}
static inline int range_is_allowed(unsigned long pfn, unsigned long size) static inline int range_is_allowed(unsigned long pfn, unsigned long size)
{ {
return 1; return 1;
...@@ -122,23 +130,31 @@ static ssize_t read_mem(struct file *file, char __user *buf, ...@@ -122,23 +130,31 @@ static ssize_t read_mem(struct file *file, char __user *buf,
while (count > 0) { while (count > 0) {
unsigned long remaining; unsigned long remaining;
int allowed;
sz = size_inside_page(p, count); sz = size_inside_page(p, count);
if (!range_is_allowed(p >> PAGE_SHIFT, count)) allowed = page_is_allowed(p >> PAGE_SHIFT);
if (!allowed)
return -EPERM; return -EPERM;
if (allowed == 2) {
/* Show zeros for restricted memory. */
remaining = clear_user(buf, sz);
} else {
/*
* On ia64 if a page has been mapped somewhere as
* uncached, then it must also be accessed uncached
* by the kernel or data corruption may occur.
*/
ptr = xlate_dev_mem_ptr(p);
if (!ptr)
return -EFAULT;
/* remaining = copy_to_user(buf, ptr, sz);
* On ia64 if a page has been mapped somewhere as uncached, then
* it must also be accessed uncached by the kernel or data unxlate_dev_mem_ptr(p, ptr);
* corruption may occur. }
*/
ptr = xlate_dev_mem_ptr(p);
if (!ptr)
return -EFAULT;
remaining = copy_to_user(buf, ptr, sz);
unxlate_dev_mem_ptr(p, ptr);
if (remaining) if (remaining)
return -EFAULT; return -EFAULT;
...@@ -181,30 +197,36 @@ static ssize_t write_mem(struct file *file, const char __user *buf, ...@@ -181,30 +197,36 @@ static ssize_t write_mem(struct file *file, const char __user *buf,
#endif #endif
while (count > 0) { while (count > 0) {
int allowed;
sz = size_inside_page(p, count); sz = size_inside_page(p, count);
if (!range_is_allowed(p >> PAGE_SHIFT, sz)) allowed = page_is_allowed(p >> PAGE_SHIFT);
if (!allowed)
return -EPERM; return -EPERM;
/* /* Skip actual writing when a page is marked as restricted. */
* On ia64 if a page has been mapped somewhere as uncached, then if (allowed == 1) {
* it must also be accessed uncached by the kernel or data /*
* corruption may occur. * On ia64 if a page has been mapped somewhere as
*/ * uncached, then it must also be accessed uncached
ptr = xlate_dev_mem_ptr(p); * by the kernel or data corruption may occur.
if (!ptr) { */
if (written) ptr = xlate_dev_mem_ptr(p);
break; if (!ptr) {
return -EFAULT; if (written)
} break;
return -EFAULT;
}
copied = copy_from_user(ptr, buf, sz); copied = copy_from_user(ptr, buf, sz);
unxlate_dev_mem_ptr(p, ptr); unxlate_dev_mem_ptr(p, ptr);
if (copied) { if (copied) {
written += sz - copied; written += sz - copied;
if (written) if (written)
break; break;
return -EFAULT; return -EFAULT;
}
} }
buf += sz; buf += sz;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment