Commit 2a598436 authored by Jiaxun Yang's avatar Jiaxun Yang Committed by Paul Burton

MIPS: Drop CPU_SUPPORTS_UNCACHED_ACCELERATED

CPU_SUPPORTS_UNCACHED_ACCELERATED was introduced when kernel can't handle
writecombine remap well. Nowadays drivers can try writecombine remap by
themselves so this function is nolonger needed.
Signed-off-by: default avatarJiaxun Yang <jiaxun.yang@flygoat.com>
Signed-off-by: default avatarPaul Burton <paulburton@kernel.org>
Cc: linux-mips@vger.kernel.org
Cc: chenhe@lemote.com
parent 75cac781
......@@ -1973,7 +1973,6 @@ config SYS_HAS_CPU_LOONGSON2F
bool
select CPU_SUPPORTS_CPUFREQ
select CPU_SUPPORTS_ADDRWINCFG if 64BIT
select CPU_SUPPORTS_UNCACHED_ACCELERATED
config SYS_HAS_CPU_LOONGSON1B
bool
......@@ -2150,8 +2149,6 @@ config CPU_SUPPORTS_ADDRWINCFG
config CPU_SUPPORTS_HUGEPAGES
bool
depends on !(32BIT && (ARCH_PHYS_ADDR_T_64BIT || EVA))
config CPU_SUPPORTS_UNCACHED_ACCELERATED
bool
config MIPS_PGD_C0_CONTEXT
bool
default y if 64BIT && (CPU_MIPSR2 || CPU_MIPSR6) && !CPU_XLP
......
......@@ -643,17 +643,6 @@ static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
#include <asm-generic/pgtable.h>
/*
* uncached accelerated TLB map for video memory access
*/
#ifdef CONFIG_CPU_SUPPORTS_UNCACHED_ACCELERATED
#define __HAVE_PHYS_MEM_ACCESS_PROT
struct file;
pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
unsigned long size, pgprot_t vma_prot);
#endif
/*
* We provide our own get_unmapped area to cope with the virtual aliasing
* constraints placed on us by the cache architecture.
......
......@@ -60,61 +60,3 @@ int __uncached_access(struct file *file, unsigned long addr)
((addr >= LOONGSON_MMIO_MEM_START) &&
(addr < LOONGSON_MMIO_MEM_END));
}
#ifdef CONFIG_CPU_SUPPORTS_UNCACHED_ACCELERATED
#include <linux/pci.h>
#include <linux/sched.h>
#include <asm/current.h>
static unsigned long uca_start, uca_end;
pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
unsigned long size, pgprot_t vma_prot)
{
unsigned long offset = pfn << PAGE_SHIFT;
unsigned long end = offset + size;
if (__uncached_access(file, offset)) {
if (uca_start && (offset >= uca_start) &&
(end <= uca_end))
return __pgprot((pgprot_val(vma_prot) &
~_CACHE_MASK) |
_CACHE_UNCACHED_ACCELERATED);
else
return pgprot_noncached(vma_prot);
}
return vma_prot;
}
static int __init find_vga_mem_init(void)
{
struct pci_dev *dev = 0;
struct resource *r;
int idx;
if (uca_start)
return 0;
for_each_pci_dev(dev) {
if ((dev->class >> 16) == PCI_BASE_CLASS_DISPLAY) {
for (idx = 0; idx < PCI_NUM_RESOURCES; idx++) {
r = &dev->resource[idx];
if (!r->start && r->end)
continue;
if (r->flags & IORESOURCE_IO)
continue;
if (r->flags & IORESOURCE_MEM) {
uca_start = r->start;
uca_end = r->end;
return 0;
}
}
}
}
return 0;
}
late_initcall(find_vga_mem_init);
#endif /* !CONFIG_CPU_SUPPORTS_UNCACHED_ACCELERATED */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment