Commit c6188dff authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'csky-for-linus-5.6-rc3' of git://github.com/c-sky/csky-linux

Pull csky updates from Guo Ren:
 "Sorry, I missed 5.6-rc1 merge window, but in this pull request the
  most are the fixes and the rests are between fixes and features. The
  only outside modification is the MAINTAINERS file update with our
  mailing list.

   - cache flush implementation fixes

   - ftrace modify panic fix

   - CONFIG_SMP boot problem fix

   - fix pt_regs saving for atomic.S

   - fix fixaddr_init without highmem.

   - fix stack protector support

   - fix fake Tightly-Coupled Memory code compile and use

   - fix some typos and coding convention"

* tag 'csky-for-linus-5.6-rc3' of git://github.com/c-sky/csky-linux: (23 commits)
  csky: Replace <linux/clk-provider.h> by <linux/of_clk.h>
  csky: Implement copy_thread_tls
  csky: Add PCI support
  csky: Minimize defconfig to support buildroot config.fragment
  csky: Add setup_initrd check code
  csky: Cleanup old Kconfig options
  arch/csky: fix some Kconfig typos
  csky: Fixup compile warning for three unimplemented syscalls
  csky: Remove unused cache implementation
  csky: Fixup ftrace modify panic
  csky: Add flush_icache_mm to defer flush icache all
  csky: Optimize abiv2 copy_to_user_page with VM_EXEC
  csky: Enable defer flush_dcache_page for abiv2 cpus (807/810/860)
  csky: Remove unnecessary flush_icache_* implementation
  csky: Support icache flush without specific instructions
  csky/Kconfig: Add Kconfig.platforms to support some drivers
  csky/smp: Fixup boot failed when CONFIG_SMP
  csky: Set regs->usp to kernel sp, when the exception is from kernel
  csky/mm: Fixup export invalid_pte_table symbol
  csky: Separate fixaddr_init from highmem
  ...
parents dca132a6 99db590b
...@@ -3649,6 +3649,7 @@ F: sound/pci/oxygen/ ...@@ -3649,6 +3649,7 @@ F: sound/pci/oxygen/
C-SKY ARCHITECTURE C-SKY ARCHITECTURE
M: Guo Ren <guoren@kernel.org> M: Guo Ren <guoren@kernel.org>
L: linux-csky@vger.kernel.org
T: git https://github.com/c-sky/csky-linux.git T: git https://github.com/c-sky/csky-linux.git
S: Supported S: Supported
F: arch/csky/ F: arch/csky/
......
...@@ -9,7 +9,6 @@ config CSKY ...@@ -9,7 +9,6 @@ config CSKY
select ARCH_USE_QUEUED_RWLOCKS if NR_CPUS>2 select ARCH_USE_QUEUED_RWLOCKS if NR_CPUS>2
select COMMON_CLK select COMMON_CLK
select CLKSRC_MMIO select CLKSRC_MMIO
select CLKSRC_OF
select CSKY_MPINTC if CPU_CK860 select CSKY_MPINTC if CPU_CK860
select CSKY_MP_TIMER if CPU_CK860 select CSKY_MP_TIMER if CPU_CK860
select CSKY_APB_INTC select CSKY_APB_INTC
...@@ -37,6 +36,7 @@ config CSKY ...@@ -37,6 +36,7 @@ config CSKY
select GX6605S_TIMER if CPU_CK610 select GX6605S_TIMER if CPU_CK610
select HAVE_ARCH_TRACEHOOK select HAVE_ARCH_TRACEHOOK
select HAVE_ARCH_AUDITSYSCALL select HAVE_ARCH_AUDITSYSCALL
select HAVE_COPY_THREAD_TLS
select HAVE_DYNAMIC_FTRACE select HAVE_DYNAMIC_FTRACE
select HAVE_FUNCTION_TRACER select HAVE_FUNCTION_TRACER
select HAVE_FUNCTION_GRAPH_TRACER select HAVE_FUNCTION_GRAPH_TRACER
...@@ -47,8 +47,8 @@ config CSKY ...@@ -47,8 +47,8 @@ config CSKY
select HAVE_PERF_EVENTS select HAVE_PERF_EVENTS
select HAVE_PERF_REGS select HAVE_PERF_REGS
select HAVE_PERF_USER_STACK_DUMP select HAVE_PERF_USER_STACK_DUMP
select HAVE_DMA_API_DEBUG
select HAVE_DMA_CONTIGUOUS select HAVE_DMA_CONTIGUOUS
select HAVE_STACKPROTECTOR
select HAVE_SYSCALL_TRACEPOINTS select HAVE_SYSCALL_TRACEPOINTS
select MAY_HAVE_SPARSE_IRQ select MAY_HAVE_SPARSE_IRQ
select MODULES_USE_ELF_RELA if MODULES select MODULES_USE_ELF_RELA if MODULES
...@@ -59,6 +59,11 @@ config CSKY ...@@ -59,6 +59,11 @@ config CSKY
select TIMER_OF select TIMER_OF
select USB_ARCH_HAS_EHCI select USB_ARCH_HAS_EHCI
select USB_ARCH_HAS_OHCI select USB_ARCH_HAS_OHCI
select GENERIC_PCI_IOMAP
select HAVE_PCI
select PCI_DOMAINS_GENERIC if PCI
select PCI_SYSCALL if PCI
select PCI_MSI if PCI
config CPU_HAS_CACHEV2 config CPU_HAS_CACHEV2
bool bool
...@@ -75,7 +80,7 @@ config CPU_HAS_TLBI ...@@ -75,7 +80,7 @@ config CPU_HAS_TLBI
config CPU_HAS_LDSTEX config CPU_HAS_LDSTEX
bool bool
help help
For SMP, CPU needs "ldex&stex" instrcutions to atomic operations. For SMP, CPU needs "ldex&stex" instructions for atomic operations.
config CPU_NEED_TLBSYNC config CPU_NEED_TLBSYNC
bool bool
...@@ -188,6 +193,40 @@ config CPU_PM_STOP ...@@ -188,6 +193,40 @@ config CPU_PM_STOP
bool "stop" bool "stop"
endchoice endchoice
menuconfig HAVE_TCM
bool "Tightly-Coupled/Sram Memory"
select GENERIC_ALLOCATOR
help
The implementation are not only used by TCM (Tightly-Coupled Meory)
but also used by sram on SOC bus. It follow existed linux tcm
software interface, so that old tcm application codes could be
re-used directly.
if HAVE_TCM
config ITCM_RAM_BASE
hex "ITCM ram base"
default 0xffffffff
config ITCM_NR_PAGES
int "Page count of ITCM size: NR*4KB"
range 1 256
default 32
config HAVE_DTCM
bool "DTCM Support"
config DTCM_RAM_BASE
hex "DTCM ram base"
depends on HAVE_DTCM
default 0xffffffff
config DTCM_NR_PAGES
int "Page count of DTCM size: NR*4KB"
depends on HAVE_DTCM
range 1 256
default 32
endif
config CPU_HAS_VDSP config CPU_HAS_VDSP
bool "CPU has VDSP coprocessor" bool "CPU has VDSP coprocessor"
depends on CPU_HAS_FPU && CPU_HAS_FPUV2 depends on CPU_HAS_FPU && CPU_HAS_FPUV2
...@@ -196,6 +235,10 @@ config CPU_HAS_FPU ...@@ -196,6 +235,10 @@ config CPU_HAS_FPU
bool "CPU has FPU coprocessor" bool "CPU has FPU coprocessor"
depends on CPU_CK807 || CPU_CK810 || CPU_CK860 depends on CPU_CK807 || CPU_CK810 || CPU_CK860
config CPU_HAS_ICACHE_INS
bool "CPU has Icache invalidate instructions"
depends on CPU_HAS_CACHEV2
config CPU_HAS_TEE config CPU_HAS_TEE
bool "CPU has Trusted Execution Environment" bool "CPU has Trusted Execution Environment"
depends on CPU_CK810 depends on CPU_CK810
...@@ -235,4 +278,6 @@ config HOTPLUG_CPU ...@@ -235,4 +278,6 @@ config HOTPLUG_CPU
Say N if you want to disable CPU hotplug. Say N if you want to disable CPU hotplug.
endmenu endmenu
source "arch/csky/Kconfig.platforms"
source "kernel/Kconfig.hz" source "kernel/Kconfig.hz"
menu "Platform drivers selection"
config ARCH_CSKY_DW_APB_ICTL
bool "Select dw-apb interrupt controller"
select DW_APB_ICTL
default y
help
This enables support for snps dw-apb-ictl
endmenu
...@@ -48,9 +48,8 @@ extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start, u ...@@ -48,9 +48,8 @@ extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start, u
#define flush_icache_page(vma, page) do {} while (0); #define flush_icache_page(vma, page) do {} while (0);
#define flush_icache_range(start, end) cache_wbinv_range(start, end) #define flush_icache_range(start, end) cache_wbinv_range(start, end)
#define flush_icache_mm_range(mm, start, end) cache_wbinv_range(start, end)
#define flush_icache_user_range(vma,page,addr,len) \ #define flush_icache_deferred(mm) do {} while (0);
flush_dcache_page(page)
#define copy_from_user_page(vma, page, vaddr, dst, src, len) \ #define copy_from_user_page(vma, page, vaddr, dst, src, len) \
do { \ do { \
......
...@@ -16,14 +16,16 @@ ...@@ -16,14 +16,16 @@
#define LSAVE_A4 40 #define LSAVE_A4 40
#define LSAVE_A5 44 #define LSAVE_A5 44
#define usp ss1
.macro USPTOKSP .macro USPTOKSP
mtcr sp, ss1 mtcr sp, usp
mfcr sp, ss0 mfcr sp, ss0
.endm .endm
.macro KSPTOUSP .macro KSPTOUSP
mtcr sp, ss0 mtcr sp, ss0
mfcr sp, ss1 mfcr sp, usp
.endm .endm
.macro SAVE_ALL epc_inc .macro SAVE_ALL epc_inc
...@@ -45,7 +47,13 @@ ...@@ -45,7 +47,13 @@
add lr, r13 add lr, r13
stw lr, (sp, 8) stw lr, (sp, 8)
mov lr, sp
addi lr, 32
addi lr, 32
addi lr, 16
bt 2f
mfcr lr, ss1 mfcr lr, ss1
2:
stw lr, (sp, 16) stw lr, (sp, 16)
stw a0, (sp, 20) stw a0, (sp, 20)
...@@ -79,9 +87,10 @@ ...@@ -79,9 +87,10 @@
ldw a0, (sp, 12) ldw a0, (sp, 12)
mtcr a0, epsr mtcr a0, epsr
btsti a0, 31 btsti a0, 31
bt 1f
ldw a0, (sp, 16) ldw a0, (sp, 16)
mtcr a0, ss1 mtcr a0, ss1
1:
ldw a0, (sp, 24) ldw a0, (sp, 24)
ldw a1, (sp, 28) ldw a1, (sp, 28)
ldw a2, (sp, 32) ldw a2, (sp, 32)
...@@ -102,9 +111,9 @@ ...@@ -102,9 +111,9 @@
addi sp, 32 addi sp, 32
addi sp, 8 addi sp, 8
bt 1f bt 2f
KSPTOUSP KSPTOUSP
1: 2:
rte rte
.endm .endm
......
...@@ -6,46 +6,80 @@ ...@@ -6,46 +6,80 @@
#include <linux/mm.h> #include <linux/mm.h>
#include <asm/cache.h> #include <asm/cache.h>
void flush_icache_page(struct vm_area_struct *vma, struct page *page) void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
pte_t *pte)
{ {
unsigned long start; unsigned long addr;
struct page *page;
start = (unsigned long) kmap_atomic(page); page = pfn_to_page(pte_pfn(*pte));
if (page == ZERO_PAGE(0))
return;
cache_wbinv_range(start, start + PAGE_SIZE); if (test_and_set_bit(PG_dcache_clean, &page->flags))
return;
kunmap_atomic((void *)start); addr = (unsigned long) kmap_atomic(page);
}
void flush_icache_user_range(struct vm_area_struct *vma, struct page *page, dcache_wb_range(addr, addr + PAGE_SIZE);
unsigned long vaddr, int len)
{
unsigned long kaddr;
kaddr = (unsigned long) kmap_atomic(page) + (vaddr & ~PAGE_MASK); if (vma->vm_flags & VM_EXEC)
icache_inv_range(addr, addr + PAGE_SIZE);
kunmap_atomic((void *) addr);
}
cache_wbinv_range(kaddr, kaddr + len); void flush_icache_deferred(struct mm_struct *mm)
{
unsigned int cpu = smp_processor_id();
cpumask_t *mask = &mm->context.icache_stale_mask;
kunmap_atomic((void *)kaddr); if (cpumask_test_cpu(cpu, mask)) {
cpumask_clear_cpu(cpu, mask);
/*
* Ensure the remote hart's writes are visible to this hart.
* This pairs with a barrier in flush_icache_mm.
*/
smp_mb();
local_icache_inv_all(NULL);
}
} }
void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, void flush_icache_mm_range(struct mm_struct *mm,
pte_t *pte) unsigned long start, unsigned long end)
{ {
unsigned long addr, pfn; unsigned int cpu;
struct page *page; cpumask_t others, *mask;
pfn = pte_pfn(*pte); preempt_disable();
if (unlikely(!pfn_valid(pfn)))
return;
page = pfn_to_page(pfn); #ifdef CONFIG_CPU_HAS_ICACHE_INS
if (page == ZERO_PAGE(0)) if (mm == current->mm) {
icache_inv_range(start, end);
preempt_enable();
return; return;
}
#endif
addr = (unsigned long) kmap_atomic(page); /* Mark every hart's icache as needing a flush for this MM. */
mask = &mm->context.icache_stale_mask;
cpumask_setall(mask);
cache_wbinv_range(addr, addr + PAGE_SIZE); /* Flush this hart's I$ now, and mark it as flushed. */
cpu = smp_processor_id();
cpumask_clear_cpu(cpu, mask);
local_icache_inv_all(NULL);
kunmap_atomic((void *) addr); /*
* Flush the I$ of other harts concurrently executing, and mark them as
* flushed.
*/
cpumask_andnot(&others, mm_cpumask(mm), cpumask_of(cpu));
if (mm != current->active_mm || !cpumask_empty(&others)) {
on_each_cpu_mask(&others, local_icache_inv_all, NULL, 1);
cpumask_clear(mask);
}
preempt_enable();
} }
...@@ -13,24 +13,27 @@ ...@@ -13,24 +13,27 @@
#define flush_cache_all() do { } while (0) #define flush_cache_all() do { } while (0)
#define flush_cache_mm(mm) do { } while (0) #define flush_cache_mm(mm) do { } while (0)
#define flush_cache_dup_mm(mm) do { } while (0) #define flush_cache_dup_mm(mm) do { } while (0)
#define flush_cache_range(vma, start, end) do { } while (0)
#define flush_cache_page(vma, vmaddr, pfn) do { } while (0)
#define flush_cache_range(vma, start, end) \ #define PG_dcache_clean PG_arch_1
do { \
if (vma->vm_flags & VM_EXEC) \ #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
icache_inv_all(); \ static inline void flush_dcache_page(struct page *page)
} while (0) {
if (test_bit(PG_dcache_clean, &page->flags))
clear_bit(PG_dcache_clean, &page->flags);
}
#define flush_cache_page(vma, vmaddr, pfn) do { } while (0)
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0
#define flush_dcache_page(page) do { } while (0)
#define flush_dcache_mmap_lock(mapping) do { } while (0) #define flush_dcache_mmap_lock(mapping) do { } while (0)
#define flush_dcache_mmap_unlock(mapping) do { } while (0) #define flush_dcache_mmap_unlock(mapping) do { } while (0)
#define flush_icache_page(vma, page) do { } while (0)
#define flush_icache_range(start, end) cache_wbinv_range(start, end) #define flush_icache_range(start, end) cache_wbinv_range(start, end)
void flush_icache_page(struct vm_area_struct *vma, struct page *page); void flush_icache_mm_range(struct mm_struct *mm,
void flush_icache_user_range(struct vm_area_struct *vma, struct page *page, unsigned long start, unsigned long end);
unsigned long vaddr, int len); void flush_icache_deferred(struct mm_struct *mm);
#define flush_cache_vmap(start, end) do { } while (0) #define flush_cache_vmap(start, end) do { } while (0)
#define flush_cache_vunmap(start, end) do { } while (0) #define flush_cache_vunmap(start, end) do { } while (0)
...@@ -38,7 +41,13 @@ void flush_icache_user_range(struct vm_area_struct *vma, struct page *page, ...@@ -38,7 +41,13 @@ void flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
#define copy_to_user_page(vma, page, vaddr, dst, src, len) \ #define copy_to_user_page(vma, page, vaddr, dst, src, len) \
do { \ do { \
memcpy(dst, src, len); \ memcpy(dst, src, len); \
cache_wbinv_range((unsigned long)dst, (unsigned long)dst + len); \ if (vma->vm_flags & VM_EXEC) { \
dcache_wb_range((unsigned long)dst, \
(unsigned long)dst + len); \
flush_icache_mm_range(current->mm, \
(unsigned long)dst, \
(unsigned long)dst + len); \
} \
} while (0) } while (0)
#define copy_from_user_page(vma, page, vaddr, dst, src, len) \ #define copy_from_user_page(vma, page, vaddr, dst, src, len) \
memcpy(dst, src, len) memcpy(dst, src, len)
......
...@@ -31,7 +31,13 @@ ...@@ -31,7 +31,13 @@
mfcr lr, epsr mfcr lr, epsr
stw lr, (sp, 12) stw lr, (sp, 12)
btsti lr, 31
bf 1f
addi lr, sp, 152
br 2f
1:
mfcr lr, usp mfcr lr, usp
2:
stw lr, (sp, 16) stw lr, (sp, 16)
stw a0, (sp, 20) stw a0, (sp, 20)
...@@ -64,8 +70,10 @@ ...@@ -64,8 +70,10 @@
mtcr a0, epc mtcr a0, epc
ldw a0, (sp, 12) ldw a0, (sp, 12)
mtcr a0, epsr mtcr a0, epsr
btsti a0, 31
ldw a0, (sp, 16) ldw a0, (sp, 16)
mtcr a0, usp mtcr a0, usp
mtcr a0, ss0
#ifdef CONFIG_CPU_HAS_HILO #ifdef CONFIG_CPU_HAS_HILO
ldw a0, (sp, 140) ldw a0, (sp, 140)
...@@ -86,6 +94,9 @@ ...@@ -86,6 +94,9 @@
addi sp, 40 addi sp, 40
ldm r16-r30, (sp) ldm r16-r30, (sp)
addi sp, 72 addi sp, 72
bf 1f
mfcr sp, ss0
1:
rte rte
.endm .endm
......
...@@ -10,9 +10,6 @@ CONFIG_BSD_PROCESS_ACCT=y ...@@ -10,9 +10,6 @@ CONFIG_BSD_PROCESS_ACCT=y
CONFIG_BSD_PROCESS_ACCT_V3=y CONFIG_BSD_PROCESS_ACCT_V3=y
CONFIG_MODULES=y CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y CONFIG_MODULE_UNLOAD=y
CONFIG_DEFAULT_DEADLINE=y
CONFIG_CPU_CK807=y
CONFIG_CPU_HAS_FPU=y
CONFIG_NET=y CONFIG_NET=y
CONFIG_PACKET=y CONFIG_PACKET=y
CONFIG_UNIX=y CONFIG_UNIX=y
...@@ -27,10 +24,7 @@ CONFIG_SERIAL_NONSTANDARD=y ...@@ -27,10 +24,7 @@ CONFIG_SERIAL_NONSTANDARD=y
CONFIG_SERIAL_8250=y CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y CONFIG_SERIAL_8250_CONSOLE=y
CONFIG_SERIAL_OF_PLATFORM=y CONFIG_SERIAL_OF_PLATFORM=y
CONFIG_TTY_PRINTK=y
# CONFIG_VGA_CONSOLE is not set # CONFIG_VGA_CONSOLE is not set
CONFIG_CSKY_MPTIMER=y
CONFIG_GX6605S_TIMER=y
CONFIG_PM_DEVFREQ=y CONFIG_PM_DEVFREQ=y
CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND=y CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND=y
CONFIG_DEVFREQ_GOV_PERFORMANCE=y CONFIG_DEVFREQ_GOV_PERFORMANCE=y
...@@ -56,6 +50,4 @@ CONFIG_CRAMFS=y ...@@ -56,6 +50,4 @@ CONFIG_CRAMFS=y
CONFIG_ROMFS_FS=y CONFIG_ROMFS_FS=y
CONFIG_NFS_FS=y CONFIG_NFS_FS=y
CONFIG_PRINTK_TIME=y CONFIG_PRINTK_TIME=y
CONFIG_DEBUG_INFO=y
CONFIG_DEBUG_FS=y
CONFIG_MAGIC_SYSRQ=y CONFIG_MAGIC_SYSRQ=y
...@@ -28,7 +28,6 @@ generic-y += local64.h ...@@ -28,7 +28,6 @@ generic-y += local64.h
generic-y += mm-arch-hooks.h generic-y += mm-arch-hooks.h
generic-y += mmiowb.h generic-y += mmiowb.h
generic-y += module.h generic-y += module.h
generic-y += pci.h
generic-y += percpu.h generic-y += percpu.h
generic-y += preempt.h generic-y += preempt.h
generic-y += qrwlock.h generic-y += qrwlock.h
......
...@@ -16,6 +16,7 @@ void dcache_wb_line(unsigned long start); ...@@ -16,6 +16,7 @@ void dcache_wb_line(unsigned long start);
void icache_inv_range(unsigned long start, unsigned long end); void icache_inv_range(unsigned long start, unsigned long end);
void icache_inv_all(void); void icache_inv_all(void);
void local_icache_inv_all(void *priv);
void dcache_wb_range(unsigned long start, unsigned long end); void dcache_wb_range(unsigned long start, unsigned long end);
void dcache_wbinv_all(void); void dcache_wbinv_all(void);
......
...@@ -4,6 +4,7 @@ ...@@ -4,6 +4,7 @@
#ifndef __ASM_CSKY_CACHEFLUSH_H #ifndef __ASM_CSKY_CACHEFLUSH_H
#define __ASM_CSKY_CACHEFLUSH_H #define __ASM_CSKY_CACHEFLUSH_H
#include <linux/mm.h>
#include <abi/cacheflush.h> #include <abi/cacheflush.h>
#endif /* __ASM_CSKY_CACHEFLUSH_H */ #endif /* __ASM_CSKY_CACHEFLUSH_H */
...@@ -5,12 +5,16 @@ ...@@ -5,12 +5,16 @@
#define __ASM_CSKY_FIXMAP_H #define __ASM_CSKY_FIXMAP_H
#include <asm/page.h> #include <asm/page.h>
#include <asm/memory.h>
#ifdef CONFIG_HIGHMEM #ifdef CONFIG_HIGHMEM
#include <linux/threads.h> #include <linux/threads.h>
#include <asm/kmap_types.h> #include <asm/kmap_types.h>
#endif #endif
enum fixed_addresses { enum fixed_addresses {
#ifdef CONFIG_HAVE_TCM
FIX_TCM = TCM_NR_PAGES,
#endif
#ifdef CONFIG_HIGHMEM #ifdef CONFIG_HIGHMEM
FIX_KMAP_BEGIN, FIX_KMAP_BEGIN,
FIX_KMAP_END = FIX_KMAP_BEGIN + (KM_TYPE_NR * NR_CPUS) - 1, FIX_KMAP_END = FIX_KMAP_BEGIN + (KM_TYPE_NR * NR_CPUS) - 1,
...@@ -18,10 +22,13 @@ enum fixed_addresses { ...@@ -18,10 +22,13 @@ enum fixed_addresses {
__end_of_fixed_addresses __end_of_fixed_addresses
}; };
#define FIXADDR_TOP 0xffffc000
#define FIXADDR_SIZE (__end_of_fixed_addresses << PAGE_SHIFT) #define FIXADDR_SIZE (__end_of_fixed_addresses << PAGE_SHIFT)
#define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE) #define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE)
#include <asm-generic/fixmap.h> #include <asm-generic/fixmap.h>
extern void fixrange_init(unsigned long start, unsigned long end,
pgd_t *pgd_base);
extern void __init fixaddr_init(void);
#endif /* __ASM_CSKY_FIXMAP_H */ #endif /* __ASM_CSKY_FIXMAP_H */
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ASM_CSKY_MEMORY_H
#define __ASM_CSKY_MEMORY_H
#include <linux/compiler.h>
#include <linux/const.h>
#include <linux/types.h>
#include <linux/sizes.h>
#define FIXADDR_TOP _AC(0xffffc000, UL)
#define PKMAP_BASE _AC(0xff800000, UL)
#define VMALLOC_START _AC(0xc0008000, UL)
#define VMALLOC_END (PKMAP_BASE - (PAGE_SIZE * 2))
#ifdef CONFIG_HAVE_TCM
#ifdef CONFIG_HAVE_DTCM
#define TCM_NR_PAGES (CONFIG_ITCM_NR_PAGES + CONFIG_DTCM_NR_PAGES)
#else
#define TCM_NR_PAGES (CONFIG_ITCM_NR_PAGES)
#endif
#define FIXADDR_TCM _AC(FIXADDR_TOP - (TCM_NR_PAGES * PAGE_SIZE), UL)
#endif
#endif
...@@ -7,6 +7,7 @@ ...@@ -7,6 +7,7 @@
typedef struct { typedef struct {
atomic64_t asid; atomic64_t asid;
void *vdso; void *vdso;
cpumask_t icache_stale_mask;
} mm_context_t; } mm_context_t;
#endif /* __ASM_CSKY_MMU_H */ #endif /* __ASM_CSKY_MMU_H */
...@@ -43,5 +43,7 @@ switch_mm(struct mm_struct *prev, struct mm_struct *next, ...@@ -43,5 +43,7 @@ switch_mm(struct mm_struct *prev, struct mm_struct *next,
TLBMISS_HANDLER_SETUP_PGD(next->pgd); TLBMISS_HANDLER_SETUP_PGD(next->pgd);
write_mmu_entryhi(next->context.asid.counter); write_mmu_entryhi(next->context.asid.counter);
flush_icache_deferred(next);
} }
#endif /* __ASM_CSKY_MMU_CONTEXT_H */ #endif /* __ASM_CSKY_MMU_CONTEXT_H */
/* SPDX-License-Identifier: GPL-2.0-only */
#ifndef __ASM_CSKY_PCI_H
#define __ASM_CSKY_PCI_H
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/dma-mapping.h>
#include <asm/io.h>
#define PCIBIOS_MIN_IO 0
#define PCIBIOS_MIN_MEM 0
/* C-SKY shim does not initialize PCI bus */
#define pcibios_assign_all_busses() 1
extern int isa_dma_bridge_buggy;
#ifdef CONFIG_PCI
static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
{
/* no legacy IRQ on csky */
return -ENODEV;
}
static inline int pci_proc_domain(struct pci_bus *bus)
{
/* always show the domain in /proc */
return 1;
}
#endif /* CONFIG_PCI */
#endif /* __ASM_CSKY_PCI_H */
...@@ -5,6 +5,7 @@ ...@@ -5,6 +5,7 @@
#define __ASM_CSKY_PGTABLE_H #define __ASM_CSKY_PGTABLE_H
#include <asm/fixmap.h> #include <asm/fixmap.h>
#include <asm/memory.h>
#include <asm/addrspace.h> #include <asm/addrspace.h>
#include <abi/pgtable-bits.h> #include <abi/pgtable-bits.h>
#include <asm-generic/pgtable-nopmd.h> #include <asm-generic/pgtable-nopmd.h>
...@@ -16,11 +17,6 @@ ...@@ -16,11 +17,6 @@
#define USER_PTRS_PER_PGD (0x80000000UL/PGDIR_SIZE) #define USER_PTRS_PER_PGD (0x80000000UL/PGDIR_SIZE)
#define FIRST_USER_ADDRESS 0UL #define FIRST_USER_ADDRESS 0UL
#define PKMAP_BASE (0xff800000)
#define VMALLOC_START (0xc0008000)
#define VMALLOC_END (PKMAP_BASE - 2*PAGE_SIZE)
/* /*
* C-SKY is two-level paging structure: * C-SKY is two-level paging structure:
*/ */
......
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_STACKPROTECTOR_H
#define _ASM_STACKPROTECTOR_H 1
#include <linux/random.h>
#include <linux/version.h>
extern unsigned long __stack_chk_guard;
/*
* Initialize the stackprotector canary value.
*
* NOTE: this must only be called from functions that never return,
* and it must always be inlined.
*/
static __always_inline void boot_init_stack_canary(void)
{
unsigned long canary;
/* Try to get a semi random initial value. */
get_random_bytes(&canary, sizeof(canary));
canary ^= LINUX_VERSION_CODE;
canary &= CANARY_MASK;
current->stack_canary = canary;
__stack_chk_guard = current->stack_canary;
}
#endif /* __ASM_SH_STACKPROTECTOR_H */
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ASM_CSKY_TCM_H
#define __ASM_CSKY_TCM_H
#ifndef CONFIG_HAVE_TCM
#error "You should not be including tcm.h unless you have a TCM!"
#endif
#include <linux/compiler.h>
/* Tag variables with this */
#define __tcmdata __section(.tcm.data)
/* Tag constants with this */
#define __tcmconst __section(.tcm.rodata)
/* Tag functions inside TCM called from outside TCM with this */
#define __tcmfunc __section(.tcm.text) noinline
/* Tag function inside TCM called from inside TCM with this */
#define __tcmlocalfunc __section(.tcm.text)
void *tcm_alloc(size_t len);
void tcm_free(void *addr, size_t len);
#endif
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
#define __ARCH_WANT_STAT64
#define __ARCH_WANT_NEW_STAT
#define __ARCH_WANT_SYS_CLONE #define __ARCH_WANT_SYS_CLONE
#define __ARCH_WANT_SYS_CLONE3
#define __ARCH_WANT_SET_GET_RLIMIT #define __ARCH_WANT_SET_GET_RLIMIT
#define __ARCH_WANT_TIME32_SYSCALLS #define __ARCH_WANT_TIME32_SYSCALLS
#include <asm-generic/unistd.h> #include <asm-generic/unistd.h>
......
...@@ -17,10 +17,12 @@ ENTRY(csky_cmpxchg) ...@@ -17,10 +17,12 @@ ENTRY(csky_cmpxchg)
mfcr a3, epc mfcr a3, epc
addi a3, TRAP0_SIZE addi a3, TRAP0_SIZE
subi sp, 8 subi sp, 16
stw a3, (sp, 0) stw a3, (sp, 0)
mfcr a3, epsr mfcr a3, epsr
stw a3, (sp, 4) stw a3, (sp, 4)
mfcr a3, usp
stw a3, (sp, 8)
psrset ee psrset ee
#ifdef CONFIG_CPU_HAS_LDSTEX #ifdef CONFIG_CPU_HAS_LDSTEX
...@@ -47,7 +49,9 @@ ENTRY(csky_cmpxchg) ...@@ -47,7 +49,9 @@ ENTRY(csky_cmpxchg)
mtcr a3, epc mtcr a3, epc
ldw a3, (sp, 4) ldw a3, (sp, 4)
mtcr a3, epsr mtcr a3, epsr
addi sp, 8 ldw a3, (sp, 8)
mtcr a3, usp
addi sp, 16
KSPTOUSP KSPTOUSP
rte rte
END(csky_cmpxchg) END(csky_cmpxchg)
......
...@@ -16,6 +16,12 @@ ...@@ -16,6 +16,12 @@
struct cpuinfo_csky cpu_data[NR_CPUS]; struct cpuinfo_csky cpu_data[NR_CPUS];
#ifdef CONFIG_STACKPROTECTOR
#include <linux/stackprotector.h>
unsigned long __stack_chk_guard __read_mostly;
EXPORT_SYMBOL(__stack_chk_guard);
#endif
asmlinkage void ret_from_fork(void); asmlinkage void ret_from_fork(void);
asmlinkage void ret_from_kernel_thread(void); asmlinkage void ret_from_kernel_thread(void);
...@@ -34,10 +40,11 @@ unsigned long thread_saved_pc(struct task_struct *tsk) ...@@ -34,10 +40,11 @@ unsigned long thread_saved_pc(struct task_struct *tsk)
return sw->r15; return sw->r15;
} }
int copy_thread(unsigned long clone_flags, int copy_thread_tls(unsigned long clone_flags,
unsigned long usp, unsigned long usp,
unsigned long kthread_arg, unsigned long kthread_arg,
struct task_struct *p) struct task_struct *p,
unsigned long tls)
{ {
struct switch_stack *childstack; struct switch_stack *childstack;
struct pt_regs *childregs = task_pt_regs(p); struct pt_regs *childregs = task_pt_regs(p);
...@@ -64,7 +71,7 @@ int copy_thread(unsigned long clone_flags, ...@@ -64,7 +71,7 @@ int copy_thread(unsigned long clone_flags,
childregs->usp = usp; childregs->usp = usp;
if (clone_flags & CLONE_SETTLS) if (clone_flags & CLONE_SETTLS)
task_thread_info(p)->tp_value = childregs->tls task_thread_info(p)->tp_value = childregs->tls
= childregs->regs[0]; = tls;
childregs->a0 = 0; childregs->a0 = 0;
childstack->r15 = (unsigned long) ret_from_fork; childstack->r15 = (unsigned long) ret_from_fork;
......
...@@ -47,9 +47,6 @@ static void __init csky_memblock_init(void) ...@@ -47,9 +47,6 @@ static void __init csky_memblock_init(void)
signed long size; signed long size;
memblock_reserve(__pa(_stext), _end - _stext); memblock_reserve(__pa(_stext), _end - _stext);
#ifdef CONFIG_BLK_DEV_INITRD
memblock_reserve(__pa(initrd_start), initrd_end - initrd_start);
#endif
early_init_fdt_reserve_self(); early_init_fdt_reserve_self();
early_init_fdt_scan_reserved_mem(); early_init_fdt_scan_reserved_mem();
...@@ -133,6 +130,8 @@ void __init setup_arch(char **cmdline_p) ...@@ -133,6 +130,8 @@ void __init setup_arch(char **cmdline_p)
sparse_init(); sparse_init();
fixaddr_init();
#ifdef CONFIG_HIGHMEM #ifdef CONFIG_HIGHMEM
kmap_init(); kmap_init();
#endif #endif
......
...@@ -120,7 +120,7 @@ void __init setup_smp_ipi(void) ...@@ -120,7 +120,7 @@ void __init setup_smp_ipi(void)
int rc; int rc;
if (ipi_irq == 0) if (ipi_irq == 0)
panic("%s IRQ mapping failed\n", __func__); return;
rc = request_percpu_irq(ipi_irq, handle_ipi, "IPI Interrupt", rc = request_percpu_irq(ipi_irq, handle_ipi, "IPI Interrupt",
&ipi_dummy_dev); &ipi_dummy_dev);
......
// SPDX-License-Identifier: GPL-2.0 // SPDX-License-Identifier: GPL-2.0
// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
#include <linux/clk-provider.h>
#include <linux/clocksource.h> #include <linux/clocksource.h>
#include <linux/of_clk.h>
void __init time_init(void) void __init time_init(void)
{ {
......
...@@ -2,6 +2,7 @@ ...@@ -2,6 +2,7 @@
#include <asm/vmlinux.lds.h> #include <asm/vmlinux.lds.h>
#include <asm/page.h> #include <asm/page.h>
#include <asm/memory.h>
OUTPUT_ARCH(csky) OUTPUT_ARCH(csky)
ENTRY(_start) ENTRY(_start)
...@@ -53,6 +54,54 @@ SECTIONS ...@@ -53,6 +54,54 @@ SECTIONS
RW_DATA(L1_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE) RW_DATA(L1_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE)
_edata = .; _edata = .;
#ifdef CONFIG_HAVE_TCM
.tcm_start : {
. = ALIGN(PAGE_SIZE);
__tcm_start = .;
}
.text_data_tcm FIXADDR_TCM : AT(__tcm_start)
{
. = ALIGN(4);
__stcm_text_data = .;
*(.tcm.text)
*(.tcm.rodata)
#ifndef CONFIG_HAVE_DTCM
*(.tcm.data)
#endif
. = ALIGN(4);
__etcm_text_data = .;
}
. = ADDR(.tcm_start) + SIZEOF(.tcm_start) + SIZEOF(.text_data_tcm);
#ifdef CONFIG_HAVE_DTCM
#define ITCM_SIZE CONFIG_ITCM_NR_PAGES * PAGE_SIZE
.dtcm_start : {
__dtcm_start = .;
}
.data_tcm FIXADDR_TCM + ITCM_SIZE : AT(__dtcm_start)
{
. = ALIGN(4);
__stcm_data = .;
*(.tcm.data)
. = ALIGN(4);
__etcm_data = .;
}
. = ADDR(.dtcm_start) + SIZEOF(.data_tcm);
.tcm_end : AT(ADDR(.dtcm_start) + SIZEOF(.data_tcm)) {
#else
.tcm_end : AT(ADDR(.tcm_start) + SIZEOF(.text_data_tcm)) {
#endif
. = ALIGN(PAGE_SIZE);
__tcm_end = .;
}
#endif
EXCEPTION_TABLE(L1_CACHE_BYTES) EXCEPTION_TABLE(L1_CACHE_BYTES)
BSS_SECTION(L1_CACHE_BYTES, PAGE_SIZE, L1_CACHE_BYTES) BSS_SECTION(L1_CACHE_BYTES, PAGE_SIZE, L1_CACHE_BYTES)
VBR_BASE VBR_BASE
......
# SPDX-License-Identifier: GPL-2.0-only # SPDX-License-Identifier: GPL-2.0-only
ifeq ($(CONFIG_CPU_HAS_CACHEV2),y) ifeq ($(CONFIG_CPU_HAS_CACHEV2),y)
obj-y += cachev2.o obj-y += cachev2.o
CFLAGS_REMOVE_cachev2.o = $(CC_FLAGS_FTRACE)
else else
obj-y += cachev1.o obj-y += cachev1.o
CFLAGS_REMOVE_cachev1.o = $(CC_FLAGS_FTRACE)
endif endif
obj-y += dma-mapping.o obj-y += dma-mapping.o
...@@ -14,3 +16,4 @@ obj-y += syscache.o ...@@ -14,3 +16,4 @@ obj-y += syscache.o
obj-y += tlb.o obj-y += tlb.o
obj-y += asid.o obj-y += asid.o
obj-y += context.o obj-y += context.o
obj-$(CONFIG_HAVE_TCM) += tcm.o
...@@ -94,6 +94,11 @@ void icache_inv_all(void) ...@@ -94,6 +94,11 @@ void icache_inv_all(void)
cache_op_all(INS_CACHE|CACHE_INV, 0); cache_op_all(INS_CACHE|CACHE_INV, 0);
} }
void local_icache_inv_all(void *priv)
{
cache_op_all(INS_CACHE|CACHE_INV, 0);
}
void dcache_wb_range(unsigned long start, unsigned long end) void dcache_wb_range(unsigned long start, unsigned long end)
{ {
cache_op_range(start, end, DATA_CACHE|CACHE_CLR, 0); cache_op_range(start, end, DATA_CACHE|CACHE_CLR, 0);
......
...@@ -3,15 +3,25 @@ ...@@ -3,15 +3,25 @@
#include <linux/spinlock.h> #include <linux/spinlock.h>
#include <linux/smp.h> #include <linux/smp.h>
#include <linux/mm.h>
#include <asm/cache.h> #include <asm/cache.h>
#include <asm/barrier.h> #include <asm/barrier.h>
inline void dcache_wb_line(unsigned long start) #define INS_CACHE (1 << 0)
#define CACHE_INV (1 << 4)
void local_icache_inv_all(void *priv)
{ {
asm volatile("dcache.cval1 %0\n"::"r"(start):"memory"); mtcr("cr17", INS_CACHE|CACHE_INV);
sync_is(); sync_is();
} }
void icache_inv_all(void)
{
on_each_cpu(local_icache_inv_all, NULL, 1);
}
#ifdef CONFIG_CPU_HAS_ICACHE_INS
void icache_inv_range(unsigned long start, unsigned long end) void icache_inv_range(unsigned long start, unsigned long end)
{ {
unsigned long i = start & ~(L1_CACHE_BYTES - 1); unsigned long i = start & ~(L1_CACHE_BYTES - 1);
...@@ -20,43 +30,32 @@ void icache_inv_range(unsigned long start, unsigned long end) ...@@ -20,43 +30,32 @@ void icache_inv_range(unsigned long start, unsigned long end)
asm volatile("icache.iva %0\n"::"r"(i):"memory"); asm volatile("icache.iva %0\n"::"r"(i):"memory");
sync_is(); sync_is();
} }
#else
void icache_inv_all(void) void icache_inv_range(unsigned long start, unsigned long end)
{ {
asm volatile("icache.ialls\n":::"memory"); icache_inv_all();
sync_is();
} }
#endif
void dcache_wb_range(unsigned long start, unsigned long end) inline void dcache_wb_line(unsigned long start)
{ {
unsigned long i = start & ~(L1_CACHE_BYTES - 1); asm volatile("dcache.cval1 %0\n"::"r"(start):"memory");
for (; i < end; i += L1_CACHE_BYTES)
asm volatile("dcache.cval1 %0\n"::"r"(i):"memory");
sync_is(); sync_is();
} }
void dcache_inv_range(unsigned long start, unsigned long end) void dcache_wb_range(unsigned long start, unsigned long end)
{ {
unsigned long i = start & ~(L1_CACHE_BYTES - 1); unsigned long i = start & ~(L1_CACHE_BYTES - 1);
for (; i < end; i += L1_CACHE_BYTES) for (; i < end; i += L1_CACHE_BYTES)
asm volatile("dcache.civa %0\n"::"r"(i):"memory"); asm volatile("dcache.cval1 %0\n"::"r"(i):"memory");
sync_is(); sync_is();
} }
void cache_wbinv_range(unsigned long start, unsigned long end) void cache_wbinv_range(unsigned long start, unsigned long end)
{ {
unsigned long i = start & ~(L1_CACHE_BYTES - 1); dcache_wb_range(start, end);
icache_inv_range(start, end);
for (; i < end; i += L1_CACHE_BYTES)
asm volatile("dcache.cval1 %0\n"::"r"(i):"memory");
sync_is();
i = start & ~(L1_CACHE_BYTES - 1);
for (; i < end; i += L1_CACHE_BYTES)
asm volatile("icache.iva %0\n"::"r"(i):"memory");
sync_is();
} }
EXPORT_SYMBOL(cache_wbinv_range); EXPORT_SYMBOL(cache_wbinv_range);
......
...@@ -117,85 +117,29 @@ struct page *kmap_atomic_to_page(void *ptr) ...@@ -117,85 +117,29 @@ struct page *kmap_atomic_to_page(void *ptr)
return pte_page(*pte); return pte_page(*pte);
} }
static void __init fixrange_init(unsigned long start, unsigned long end, static void __init kmap_pages_init(void)
pgd_t *pgd_base)
{ {
#ifdef CONFIG_HIGHMEM
pgd_t *pgd;
pud_t *pud;
pmd_t *pmd;
pte_t *pte;
int i, j, k;
unsigned long vaddr; unsigned long vaddr;
vaddr = start;
i = __pgd_offset(vaddr);
j = __pud_offset(vaddr);
k = __pmd_offset(vaddr);
pgd = pgd_base + i;
for ( ; (i < PTRS_PER_PGD) && (vaddr != end); pgd++, i++) {
pud = (pud_t *)pgd;
for ( ; (j < PTRS_PER_PUD) && (vaddr != end); pud++, j++) {
pmd = (pmd_t *)pud;
for (; (k < PTRS_PER_PMD) && (vaddr != end); pmd++, k++) {
if (pmd_none(*pmd)) {
pte = (pte_t *) memblock_alloc_low(PAGE_SIZE, PAGE_SIZE);
if (!pte)
panic("%s: Failed to allocate %lu bytes align=%lx\n",
__func__, PAGE_SIZE,
PAGE_SIZE);
set_pmd(pmd, __pmd(__pa(pte)));
BUG_ON(pte != pte_offset_kernel(pmd, 0));
}
vaddr += PMD_SIZE;
}
k = 0;
}
j = 0;
}
#endif
}
void __init fixaddr_kmap_pages_init(void)
{
unsigned long vaddr;
pgd_t *pgd_base;
#ifdef CONFIG_HIGHMEM
pgd_t *pgd; pgd_t *pgd;
pmd_t *pmd; pmd_t *pmd;
pud_t *pud; pud_t *pud;
pte_t *pte; pte_t *pte;
#endif
pgd_base = swapper_pg_dir;
/*
* Fixed mappings:
*/
vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
fixrange_init(vaddr, 0, pgd_base);
#ifdef CONFIG_HIGHMEM
/*
* Permanent kmaps:
*/
vaddr = PKMAP_BASE; vaddr = PKMAP_BASE;
fixrange_init(vaddr, vaddr + PAGE_SIZE*LAST_PKMAP, pgd_base); fixrange_init(vaddr, vaddr + PAGE_SIZE*LAST_PKMAP, swapper_pg_dir);
pgd = swapper_pg_dir + __pgd_offset(vaddr); pgd = swapper_pg_dir + __pgd_offset(vaddr);
pud = (pud_t *)pgd; pud = (pud_t *)pgd;
pmd = pmd_offset(pud, vaddr); pmd = pmd_offset(pud, vaddr);
pte = pte_offset_kernel(pmd, vaddr); pte = pte_offset_kernel(pmd, vaddr);
pkmap_page_table = pte; pkmap_page_table = pte;
#endif
} }
void __init kmap_init(void) void __init kmap_init(void)
{ {
unsigned long vaddr; unsigned long vaddr;
fixaddr_kmap_pages_init(); kmap_pages_init();
vaddr = __fix_to_virt(FIX_KMAP_BEGIN); vaddr = __fix_to_virt(FIX_KMAP_BEGIN);
......
...@@ -19,6 +19,7 @@ ...@@ -19,6 +19,7 @@
#include <linux/swap.h> #include <linux/swap.h>
#include <linux/proc_fs.h> #include <linux/proc_fs.h>
#include <linux/pfn.h> #include <linux/pfn.h>
#include <linux/initrd.h>
#include <asm/setup.h> #include <asm/setup.h>
#include <asm/cachectl.h> #include <asm/cachectl.h>
...@@ -31,10 +32,50 @@ ...@@ -31,10 +32,50 @@
pgd_t swapper_pg_dir[PTRS_PER_PGD] __page_aligned_bss; pgd_t swapper_pg_dir[PTRS_PER_PGD] __page_aligned_bss;
pte_t invalid_pte_table[PTRS_PER_PTE] __page_aligned_bss; pte_t invalid_pte_table[PTRS_PER_PTE] __page_aligned_bss;
EXPORT_SYMBOL(invalid_pte_table);
unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)] unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]
__page_aligned_bss; __page_aligned_bss;
EXPORT_SYMBOL(empty_zero_page); EXPORT_SYMBOL(empty_zero_page);
#ifdef CONFIG_BLK_DEV_INITRD
static void __init setup_initrd(void)
{
unsigned long size;
if (initrd_start >= initrd_end) {
pr_err("initrd not found or empty");
goto disable;
}
if (__pa(initrd_end) > PFN_PHYS(max_low_pfn)) {
pr_err("initrd extends beyond end of memory");
goto disable;
}
size = initrd_end - initrd_start;
if (memblock_is_region_reserved(__pa(initrd_start), size)) {
pr_err("INITRD: 0x%08lx+0x%08lx overlaps in-use memory region",
__pa(initrd_start), size);
goto disable;
}
memblock_reserve(__pa(initrd_start), size);
pr_info("Initial ramdisk at: 0x%p (%lu bytes)\n",
(void *)(initrd_start), size);
initrd_below_start_ok = 1;
return;
disable:
initrd_start = initrd_end = 0;
pr_err(" - disabling initrd\n");
}
#endif
void __init mem_init(void) void __init mem_init(void)
{ {
#ifdef CONFIG_HIGHMEM #ifdef CONFIG_HIGHMEM
...@@ -46,6 +87,10 @@ void __init mem_init(void) ...@@ -46,6 +87,10 @@ void __init mem_init(void)
#endif #endif
high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT); high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT);
#ifdef CONFIG_BLK_DEV_INITRD
setup_initrd();
#endif
memblock_free_all(); memblock_free_all();
#ifdef CONFIG_HIGHMEM #ifdef CONFIG_HIGHMEM
...@@ -101,3 +146,50 @@ void __init pre_mmu_init(void) ...@@ -101,3 +146,50 @@ void __init pre_mmu_init(void)
/* Setup page mask to 4k */ /* Setup page mask to 4k */
write_mmu_pagemask(0); write_mmu_pagemask(0);
} }
void __init fixrange_init(unsigned long start, unsigned long end,
pgd_t *pgd_base)
{
pgd_t *pgd;
pud_t *pud;
pmd_t *pmd;
pte_t *pte;
int i, j, k;
unsigned long vaddr;
vaddr = start;
i = __pgd_offset(vaddr);
j = __pud_offset(vaddr);
k = __pmd_offset(vaddr);
pgd = pgd_base + i;
for ( ; (i < PTRS_PER_PGD) && (vaddr != end); pgd++, i++) {
pud = (pud_t *)pgd;
for ( ; (j < PTRS_PER_PUD) && (vaddr != end); pud++, j++) {
pmd = (pmd_t *)pud;
for (; (k < PTRS_PER_PMD) && (vaddr != end); pmd++, k++) {
if (pmd_none(*pmd)) {
pte = (pte_t *) memblock_alloc_low(PAGE_SIZE, PAGE_SIZE);
if (!pte)
panic("%s: Failed to allocate %lu bytes align=%lx\n",
__func__, PAGE_SIZE,
PAGE_SIZE);
set_pmd(pmd, __pmd(__pa(pte)));
BUG_ON(pte != pte_offset_kernel(pmd, 0));
}
vaddr += PMD_SIZE;
}
k = 0;
}
j = 0;
}
}
void __init fixaddr_init(void)
{
unsigned long vaddr;
vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
fixrange_init(vaddr, vaddr + PMD_SIZE, swapper_pg_dir);
}
...@@ -3,7 +3,7 @@ ...@@ -3,7 +3,7 @@
#include <linux/syscalls.h> #include <linux/syscalls.h>
#include <asm/page.h> #include <asm/page.h>
#include <asm/cache.h> #include <asm/cacheflush.h>
#include <asm/cachectl.h> #include <asm/cachectl.h>
SYSCALL_DEFINE3(cacheflush, SYSCALL_DEFINE3(cacheflush,
...@@ -13,17 +13,14 @@ SYSCALL_DEFINE3(cacheflush, ...@@ -13,17 +13,14 @@ SYSCALL_DEFINE3(cacheflush,
{ {
switch (cache) { switch (cache) {
case ICACHE: case ICACHE:
icache_inv_range((unsigned long)addr, case BCACHE:
(unsigned long)addr + bytes); flush_icache_mm_range(current->mm,
break; (unsigned long)addr,
(unsigned long)addr + bytes);
case DCACHE: case DCACHE:
dcache_wb_range((unsigned long)addr, dcache_wb_range((unsigned long)addr,
(unsigned long)addr + bytes); (unsigned long)addr + bytes);
break; break;
case BCACHE:
cache_wbinv_range((unsigned long)addr,
(unsigned long)addr + bytes);
break;
default: default:
return -EINVAL; return -EINVAL;
} }
......
// SPDX-License-Identifier: GPL-2.0
#include <linux/highmem.h>
#include <linux/genalloc.h>
#include <asm/tlbflush.h>
#include <asm/fixmap.h>
#if (CONFIG_ITCM_RAM_BASE == 0xffffffff)
#error "You should define ITCM_RAM_BASE"
#endif
#ifdef CONFIG_HAVE_DTCM
#if (CONFIG_DTCM_RAM_BASE == 0xffffffff)
#error "You should define DTCM_RAM_BASE"
#endif
#if (CONFIG_DTCM_RAM_BASE == CONFIG_ITCM_RAM_BASE)
#error "You should define correct DTCM_RAM_BASE"
#endif
#endif
extern char __tcm_start, __tcm_end, __dtcm_start;
static struct gen_pool *tcm_pool;
static void __init tcm_mapping_init(void)
{
pte_t *tcm_pte;
unsigned long vaddr, paddr;
int i;
paddr = CONFIG_ITCM_RAM_BASE;
if (pfn_valid(PFN_DOWN(CONFIG_ITCM_RAM_BASE)))
goto panic;
#ifndef CONFIG_HAVE_DTCM
for (i = 0; i < TCM_NR_PAGES; i++) {
#else
for (i = 0; i < CONFIG_ITCM_NR_PAGES; i++) {
#endif
vaddr = __fix_to_virt(FIX_TCM - i);
tcm_pte =
pte_offset_kernel((pmd_t *)pgd_offset_k(vaddr), vaddr);
set_pte(tcm_pte, pfn_pte(__phys_to_pfn(paddr), PAGE_KERNEL));
flush_tlb_one(vaddr);
paddr = paddr + PAGE_SIZE;
}
#ifdef CONFIG_HAVE_DTCM
if (pfn_valid(PFN_DOWN(CONFIG_DTCM_RAM_BASE)))
goto panic;
paddr = CONFIG_DTCM_RAM_BASE;
for (i = 0; i < CONFIG_DTCM_NR_PAGES; i++) {
vaddr = __fix_to_virt(FIX_TCM - CONFIG_ITCM_NR_PAGES - i);
tcm_pte =
pte_offset_kernel((pmd_t *) pgd_offset_k(vaddr), vaddr);
set_pte(tcm_pte, pfn_pte(__phys_to_pfn(paddr), PAGE_KERNEL));
flush_tlb_one(vaddr);
paddr = paddr + PAGE_SIZE;
}
#endif
#ifndef CONFIG_HAVE_DTCM
memcpy((void *)__fix_to_virt(FIX_TCM),
&__tcm_start, &__tcm_end - &__tcm_start);
pr_info("%s: mapping tcm va:0x%08lx to pa:0x%08x\n",
__func__, __fix_to_virt(FIX_TCM), CONFIG_ITCM_RAM_BASE);
pr_info("%s: __tcm_start va:0x%08lx size:%d\n",
__func__, (unsigned long)&__tcm_start, &__tcm_end - &__tcm_start);
#else
memcpy((void *)__fix_to_virt(FIX_TCM),
&__tcm_start, &__dtcm_start - &__tcm_start);
pr_info("%s: mapping itcm va:0x%08lx to pa:0x%08x\n",
__func__, __fix_to_virt(FIX_TCM), CONFIG_ITCM_RAM_BASE);
pr_info("%s: __itcm_start va:0x%08lx size:%d\n",
__func__, (unsigned long)&__tcm_start, &__dtcm_start - &__tcm_start);
memcpy((void *)__fix_to_virt(FIX_TCM - CONFIG_ITCM_NR_PAGES),
&__dtcm_start, &__tcm_end - &__dtcm_start);
pr_info("%s: mapping dtcm va:0x%08lx to pa:0x%08x\n",
__func__, __fix_to_virt(FIX_TCM - CONFIG_ITCM_NR_PAGES),
CONFIG_DTCM_RAM_BASE);
pr_info("%s: __dtcm_start va:0x%08lx size:%d\n",
__func__, (unsigned long)&__dtcm_start, &__tcm_end - &__dtcm_start);
#endif
return;
panic:
panic("TCM init error");
}
void *tcm_alloc(size_t len)
{
unsigned long vaddr;
if (!tcm_pool)
return NULL;
vaddr = gen_pool_alloc(tcm_pool, len);
if (!vaddr)
return NULL;
return (void *) vaddr;
}
EXPORT_SYMBOL(tcm_alloc);
void tcm_free(void *addr, size_t len)
{
gen_pool_free(tcm_pool, (unsigned long) addr, len);
}
EXPORT_SYMBOL(tcm_free);
static int __init tcm_setup_pool(void)
{
#ifndef CONFIG_HAVE_DTCM
u32 pool_size = (u32) (TCM_NR_PAGES * PAGE_SIZE)
- (u32) (&__tcm_end - &__tcm_start);
u32 tcm_pool_start = __fix_to_virt(FIX_TCM)
+ (u32) (&__tcm_end - &__tcm_start);
#else
u32 pool_size = (u32) (CONFIG_DTCM_NR_PAGES * PAGE_SIZE)
- (u32) (&__tcm_end - &__dtcm_start);
u32 tcm_pool_start = __fix_to_virt(FIX_TCM - CONFIG_ITCM_NR_PAGES)
+ (u32) (&__tcm_end - &__dtcm_start);
#endif
int ret;
tcm_pool = gen_pool_create(2, -1);
ret = gen_pool_add(tcm_pool, tcm_pool_start, pool_size, -1);
if (ret) {
pr_err("%s: gen_pool add failed!\n", __func__);
return ret;
}
pr_info("%s: Added %d bytes @ 0x%08x to memory pool\n",
__func__, pool_size, tcm_pool_start);
return 0;
}
static int __init tcm_init(void)
{
tcm_mapping_init();
tcm_setup_pool();
return 0;
}
arch_initcall(tcm_init);
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment