Commit b2077ebc authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'fixes' of git://ftp.arm.linux.org.uk/~rmk/linux-arm

Pull ARM fixes from Russell King:
 "This resolves some further issues with the dma mask changes on ARM
  which have been found by TI and others, and also some corner cases
  with the updates to the virtual to physical address translations.

  Konstantin also found some problems with the unwinder, which now
  performs tighter verification that the stack is valid while unwinding"

* 'fixes' of git://ftp.arm.linux.org.uk/~rmk/linux-arm:
  ARM: fix asm/memory.h build error
  ARM: 7917/1: cacheflush: correctly limit range of memory region being flushed
  ARM: 7913/1: fix framepointer check in unwind_frame
  ARM: 7912/1: check stack pointer in get_wchan
  ARM: 7909/1: mm: Call setup_dma_zone() post early_paging_init()
  ARM: 7908/1: mm: Fix the arm_dma_limit calculation
  ARM: another fix for the DMA mapping checks
parents 2430cdd0 b713aa0b
...@@ -100,23 +100,19 @@ ...@@ -100,23 +100,19 @@
#define TASK_UNMAPPED_BASE UL(0x00000000) #define TASK_UNMAPPED_BASE UL(0x00000000)
#endif #endif
#ifndef PHYS_OFFSET
#define PHYS_OFFSET UL(CONFIG_DRAM_BASE)
#endif
#ifndef END_MEM #ifndef END_MEM
#define END_MEM (UL(CONFIG_DRAM_BASE) + CONFIG_DRAM_SIZE) #define END_MEM (UL(CONFIG_DRAM_BASE) + CONFIG_DRAM_SIZE)
#endif #endif
#ifndef PAGE_OFFSET #ifndef PAGE_OFFSET
#define PAGE_OFFSET (PHYS_OFFSET) #define PAGE_OFFSET PLAT_PHYS_OFFSET
#endif #endif
/* /*
* The module can be at any place in ram in nommu mode. * The module can be at any place in ram in nommu mode.
*/ */
#define MODULES_END (END_MEM) #define MODULES_END (END_MEM)
#define MODULES_VADDR (PHYS_OFFSET) #define MODULES_VADDR PAGE_OFFSET
#define XIP_VIRT_ADDR(physaddr) (physaddr) #define XIP_VIRT_ADDR(physaddr) (physaddr)
...@@ -157,6 +153,16 @@ ...@@ -157,6 +153,16 @@
#endif #endif
#define ARCH_PGD_MASK ((1 << ARCH_PGD_SHIFT) - 1) #define ARCH_PGD_MASK ((1 << ARCH_PGD_SHIFT) - 1)
/*
* PLAT_PHYS_OFFSET is the offset (from zero) of the start of physical
* memory. This is used for XIP and NoMMU kernels, or by kernels which
* have their own mach/memory.h. Assembly code must always use
* PLAT_PHYS_OFFSET and not PHYS_OFFSET.
*/
#ifndef PLAT_PHYS_OFFSET
#define PLAT_PHYS_OFFSET UL(CONFIG_PHYS_OFFSET)
#endif
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
/* /*
...@@ -239,6 +245,8 @@ static inline unsigned long __phys_to_virt(phys_addr_t x) ...@@ -239,6 +245,8 @@ static inline unsigned long __phys_to_virt(phys_addr_t x)
#else #else
#define PHYS_OFFSET PLAT_PHYS_OFFSET
static inline phys_addr_t __virt_to_phys(unsigned long x) static inline phys_addr_t __virt_to_phys(unsigned long x)
{ {
return (phys_addr_t)x - PAGE_OFFSET + PHYS_OFFSET; return (phys_addr_t)x - PAGE_OFFSET + PHYS_OFFSET;
...@@ -251,17 +259,6 @@ static inline unsigned long __phys_to_virt(phys_addr_t x) ...@@ -251,17 +259,6 @@ static inline unsigned long __phys_to_virt(phys_addr_t x)
#endif #endif
#endif #endif
#endif /* __ASSEMBLY__ */
#ifndef PHYS_OFFSET
#ifdef PLAT_PHYS_OFFSET
#define PHYS_OFFSET PLAT_PHYS_OFFSET
#else
#define PHYS_OFFSET UL(CONFIG_PHYS_OFFSET)
#endif
#endif
#ifndef __ASSEMBLY__
/* /*
* PFNs are used to describe any physical page; this means * PFNs are used to describe any physical page; this means
......
...@@ -68,7 +68,7 @@ ENTRY(stext) ...@@ -68,7 +68,7 @@ ENTRY(stext)
#ifdef CONFIG_ARM_MPU #ifdef CONFIG_ARM_MPU
/* Calculate the size of a region covering just the kernel */ /* Calculate the size of a region covering just the kernel */
ldr r5, =PHYS_OFFSET @ Region start: PHYS_OFFSET ldr r5, =PLAT_PHYS_OFFSET @ Region start: PHYS_OFFSET
ldr r6, =(_end) @ Cover whole kernel ldr r6, =(_end) @ Cover whole kernel
sub r6, r6, r5 @ Minimum size of region to map sub r6, r6, r5 @ Minimum size of region to map
clz r6, r6 @ Region size must be 2^N... clz r6, r6 @ Region size must be 2^N...
...@@ -213,7 +213,7 @@ ENTRY(__setup_mpu) ...@@ -213,7 +213,7 @@ ENTRY(__setup_mpu)
set_region_nr r0, #MPU_RAM_REGION set_region_nr r0, #MPU_RAM_REGION
isb isb
/* Full access from PL0, PL1, shared for CONFIG_SMP, cacheable */ /* Full access from PL0, PL1, shared for CONFIG_SMP, cacheable */
ldr r0, =PHYS_OFFSET @ RAM starts at PHYS_OFFSET ldr r0, =PLAT_PHYS_OFFSET @ RAM starts at PHYS_OFFSET
ldr r5,=(MPU_AP_PL1RW_PL0RW | MPU_RGN_NORMAL) ldr r5,=(MPU_AP_PL1RW_PL0RW | MPU_RGN_NORMAL)
setup_region r0, r5, r6, MPU_DATA_SIDE @ PHYS_OFFSET, shared, enabled setup_region r0, r5, r6, MPU_DATA_SIDE @ PHYS_OFFSET, shared, enabled
......
...@@ -110,7 +110,7 @@ ENTRY(stext) ...@@ -110,7 +110,7 @@ ENTRY(stext)
sub r4, r3, r4 @ (PHYS_OFFSET - PAGE_OFFSET) sub r4, r3, r4 @ (PHYS_OFFSET - PAGE_OFFSET)
add r8, r8, r4 @ PHYS_OFFSET add r8, r8, r4 @ PHYS_OFFSET
#else #else
ldr r8, =PHYS_OFFSET @ always constant in this case ldr r8, =PLAT_PHYS_OFFSET @ always constant in this case
#endif #endif
/* /*
......
...@@ -404,6 +404,7 @@ EXPORT_SYMBOL(dump_fpu); ...@@ -404,6 +404,7 @@ EXPORT_SYMBOL(dump_fpu);
unsigned long get_wchan(struct task_struct *p) unsigned long get_wchan(struct task_struct *p)
{ {
struct stackframe frame; struct stackframe frame;
unsigned long stack_page;
int count = 0; int count = 0;
if (!p || p == current || p->state == TASK_RUNNING) if (!p || p == current || p->state == TASK_RUNNING)
return 0; return 0;
...@@ -412,9 +413,11 @@ unsigned long get_wchan(struct task_struct *p) ...@@ -412,9 +413,11 @@ unsigned long get_wchan(struct task_struct *p)
frame.sp = thread_saved_sp(p); frame.sp = thread_saved_sp(p);
frame.lr = 0; /* recovered from the stack */ frame.lr = 0; /* recovered from the stack */
frame.pc = thread_saved_pc(p); frame.pc = thread_saved_pc(p);
stack_page = (unsigned long)task_stack_page(p);
do { do {
int ret = unwind_frame(&frame); if (frame.sp < stack_page ||
if (ret < 0) frame.sp >= stack_page + THREAD_SIZE ||
unwind_frame(&frame) < 0)
return 0; return 0;
if (!in_sched_functions(frame.pc)) if (!in_sched_functions(frame.pc))
return frame.pc; return frame.pc;
......
...@@ -873,8 +873,6 @@ void __init setup_arch(char **cmdline_p) ...@@ -873,8 +873,6 @@ void __init setup_arch(char **cmdline_p)
machine_desc = mdesc; machine_desc = mdesc;
machine_name = mdesc->name; machine_name = mdesc->name;
setup_dma_zone(mdesc);
if (mdesc->reboot_mode != REBOOT_HARD) if (mdesc->reboot_mode != REBOOT_HARD)
reboot_mode = mdesc->reboot_mode; reboot_mode = mdesc->reboot_mode;
...@@ -892,6 +890,7 @@ void __init setup_arch(char **cmdline_p) ...@@ -892,6 +890,7 @@ void __init setup_arch(char **cmdline_p)
sort(&meminfo.bank, meminfo.nr_banks, sizeof(meminfo.bank[0]), meminfo_cmp, NULL); sort(&meminfo.bank, meminfo.nr_banks, sizeof(meminfo.bank[0]), meminfo_cmp, NULL);
early_paging_init(mdesc, lookup_processor_type(read_cpuid_id())); early_paging_init(mdesc, lookup_processor_type(read_cpuid_id()));
setup_dma_zone(mdesc);
sanity_check_meminfo(); sanity_check_meminfo();
arm_memblock_init(&meminfo, mdesc); arm_memblock_init(&meminfo, mdesc);
......
...@@ -31,7 +31,7 @@ int notrace unwind_frame(struct stackframe *frame) ...@@ -31,7 +31,7 @@ int notrace unwind_frame(struct stackframe *frame)
high = ALIGN(low, THREAD_SIZE); high = ALIGN(low, THREAD_SIZE);
/* check current frame pointer is within bounds */ /* check current frame pointer is within bounds */
if (fp < (low + 12) || fp + 4 >= high) if (fp < low + 12 || fp > high - 4)
return -EINVAL; return -EINVAL;
/* restore the registers from the stack frame */ /* restore the registers from the stack frame */
......
...@@ -509,9 +509,10 @@ static inline int ...@@ -509,9 +509,10 @@ static inline int
__do_cache_op(unsigned long start, unsigned long end) __do_cache_op(unsigned long start, unsigned long end)
{ {
int ret; int ret;
unsigned long chunk = PAGE_SIZE;
do { do {
unsigned long chunk = min(PAGE_SIZE, end - start);
if (signal_pending(current)) { if (signal_pending(current)) {
struct thread_info *ti = current_thread_info(); struct thread_info *ti = current_thread_info();
......
...@@ -158,13 +158,49 @@ struct dma_map_ops arm_coherent_dma_ops = { ...@@ -158,13 +158,49 @@ struct dma_map_ops arm_coherent_dma_ops = {
}; };
EXPORT_SYMBOL(arm_coherent_dma_ops); EXPORT_SYMBOL(arm_coherent_dma_ops);
static int __dma_supported(struct device *dev, u64 mask, bool warn)
{
unsigned long max_dma_pfn;
/*
* If the mask allows for more memory than we can address,
* and we actually have that much memory, then we must
* indicate that DMA to this device is not supported.
*/
if (sizeof(mask) != sizeof(dma_addr_t) &&
mask > (dma_addr_t)~0 &&
dma_to_pfn(dev, ~0) < max_pfn) {
if (warn) {
dev_warn(dev, "Coherent DMA mask %#llx is larger than dma_addr_t allows\n",
mask);
dev_warn(dev, "Driver did not use or check the return value from dma_set_coherent_mask()?\n");
}
return 0;
}
max_dma_pfn = min(max_pfn, arm_dma_pfn_limit);
/*
* Translate the device's DMA mask to a PFN limit. This
* PFN number includes the page which we can DMA to.
*/
if (dma_to_pfn(dev, mask) < max_dma_pfn) {
if (warn)
dev_warn(dev, "Coherent DMA mask %#llx (pfn %#lx-%#lx) covers a smaller range of system memory than the DMA zone pfn 0x0-%#lx\n",
mask,
dma_to_pfn(dev, 0), dma_to_pfn(dev, mask) + 1,
max_dma_pfn + 1);
return 0;
}
return 1;
}
static u64 get_coherent_dma_mask(struct device *dev) static u64 get_coherent_dma_mask(struct device *dev)
{ {
u64 mask = (u64)DMA_BIT_MASK(32); u64 mask = (u64)DMA_BIT_MASK(32);
if (dev) { if (dev) {
unsigned long max_dma_pfn;
mask = dev->coherent_dma_mask; mask = dev->coherent_dma_mask;
/* /*
...@@ -176,34 +212,8 @@ static u64 get_coherent_dma_mask(struct device *dev) ...@@ -176,34 +212,8 @@ static u64 get_coherent_dma_mask(struct device *dev)
return 0; return 0;
} }
max_dma_pfn = min(max_pfn, arm_dma_pfn_limit); if (!__dma_supported(dev, mask, true))
/*
* If the mask allows for more memory than we can address,
* and we actually have that much memory, then fail the
* allocation.
*/
if (sizeof(mask) != sizeof(dma_addr_t) &&
mask > (dma_addr_t)~0 &&
dma_to_pfn(dev, ~0) > max_dma_pfn) {
dev_warn(dev, "Coherent DMA mask %#llx is larger than dma_addr_t allows\n",
mask);
dev_warn(dev, "Driver did not use or check the return value from dma_set_coherent_mask()?\n");
return 0;
}
/*
* Now check that the mask, when translated to a PFN,
* fits within the allowable addresses which we can
* allocate.
*/
if (dma_to_pfn(dev, mask) < max_dma_pfn) {
dev_warn(dev, "Coherent DMA mask %#llx (pfn %#lx-%#lx) covers a smaller range of system memory than the DMA zone pfn 0x0-%#lx\n",
mask,
dma_to_pfn(dev, 0), dma_to_pfn(dev, mask) + 1,
arm_dma_pfn_limit + 1);
return 0; return 0;
}
} }
return mask; return mask;
...@@ -1032,28 +1042,7 @@ void arm_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, ...@@ -1032,28 +1042,7 @@ void arm_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
*/ */
int dma_supported(struct device *dev, u64 mask) int dma_supported(struct device *dev, u64 mask)
{ {
unsigned long limit; return __dma_supported(dev, mask, false);
/*
* If the mask allows for more memory than we can address,
* and we actually have that much memory, then we must
* indicate that DMA to this device is not supported.
*/
if (sizeof(mask) != sizeof(dma_addr_t) &&
mask > (dma_addr_t)~0 &&
dma_to_pfn(dev, ~0) > arm_dma_pfn_limit)
return 0;
/*
* Translate the device's DMA mask to a PFN limit. This
* PFN number includes the page which we can DMA to.
*/
limit = dma_to_pfn(dev, mask);
if (limit < arm_dma_pfn_limit)
return 0;
return 1;
} }
EXPORT_SYMBOL(dma_supported); EXPORT_SYMBOL(dma_supported);
......
...@@ -229,7 +229,7 @@ void __init setup_dma_zone(const struct machine_desc *mdesc) ...@@ -229,7 +229,7 @@ void __init setup_dma_zone(const struct machine_desc *mdesc)
#ifdef CONFIG_ZONE_DMA #ifdef CONFIG_ZONE_DMA
if (mdesc->dma_zone_size) { if (mdesc->dma_zone_size) {
arm_dma_zone_size = mdesc->dma_zone_size; arm_dma_zone_size = mdesc->dma_zone_size;
arm_dma_limit = PHYS_OFFSET + arm_dma_zone_size - 1; arm_dma_limit = __pv_phys_offset + arm_dma_zone_size - 1;
} else } else
arm_dma_limit = 0xffffffff; arm_dma_limit = 0xffffffff;
arm_dma_pfn_limit = arm_dma_limit >> PAGE_SHIFT; arm_dma_pfn_limit = arm_dma_limit >> PAGE_SHIFT;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment