Commit 3e859477 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'xtensa-20190715' of git://github.com/jcmvbkbc/linux-xtensa

Pull Xtensa updates from Max Filippov:

 - clean up PCI support code

 - add defconfig and DTS for the 'virt' board

 - abstract 'entry' and 'retw' uses in xtensa assembly in preparation
   for XEA3/NX pipeline support

 - random small cleanups

* tag 'xtensa-20190715' of git://github.com/jcmvbkbc/linux-xtensa:
  xtensa: virt: add defconfig and DTS
  xtensa: abstract 'entry' and 'retw' in assembly code
  xtensa: One function call less in bootmem_init()
  xtensa: remove arch/xtensa/include/asm/types.h
  xtensa: use generic pcibios_set_master and pcibios_enable_device
  xtensa: drop dead PCI support code
  xtensa/PCI: Remove unused variable
parents 1ec4013b 775f1f7e
// SPDX-License-Identifier: GPL-2.0
/dts-v1/;
/ {
compatible = "cdns,xtensa-iss";
#address-cells = <1>;
#size-cells = <1>;
interrupt-parent = <&pic>;
chosen {
bootargs = "console=ttyS0,115200n8 debug";
};
memory@0 {
device_type = "memory";
reg = <0x00000000 0x80000000>;
};
cpus {
#address-cells = <1>;
#size-cells = <0>;
cpu@0 {
compatible = "cdns,xtensa-cpu";
reg = <0>;
clocks = <&osc>;
};
};
clocks {
osc: osc {
#clock-cells = <0>;
compatible = "fixed-clock";
clock-frequency = <40000000>;
};
};
pic: pic {
compatible = "cdns,xtensa-pic";
/* one cell: internal irq number,
* two cells: second cell == 0: internal irq number
* second cell == 1: external irq number
*/
#address-cells = <0>;
#interrupt-cells = <2>;
interrupt-controller;
};
pci {
compatible = "pci-host-ecam-generic";
device_type = "pci";
#address-cells = <3>;
#size-cells = <2>;
#interrupt-cells = <0x1>;
bus-range = <0x0 0x3f>;
reg = <0xc0000000 0x04000000>;
// BUS_ADDRESS(3) CPU_PHYSICAL(1) SIZE(2)
ranges = <0x01000000 0x0 0xc4000000 0xc4000000 0x0 0x04000000>,
<0x02000000 0x0 0xc8000000 0xc8000000 0x0 0x18000000>;
// PCI_DEVICE(3) INT#(1) CONTROLLER(PHANDLE) CONTROLLER_DATA(2)
interrupt-map = <
0x0000 0x0 0x0 0x1 &pic 0x0 0x1
0x0800 0x0 0x0 0x1 &pic 0x1 0x1
0x1000 0x0 0x0 0x1 &pic 0x2 0x1
0x1800 0x0 0x0 0x1 &pic 0x3 0x1
>;
interrupt-map-mask = <0x1800 0x0 0x0 0x7>;
};
};
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
CONFIG_NO_HZ_IDLE=y
CONFIG_HIGH_RES_TIMERS=y
CONFIG_PREEMPT=y
CONFIG_IRQ_TIME_ACCOUNTING=y
CONFIG_BSD_PROCESS_ACCT=y
CONFIG_MEMCG=y
CONFIG_CGROUP_FREEZER=y
CONFIG_CGROUP_DEVICE=y
CONFIG_CGROUP_CPUACCT=y
CONFIG_CGROUP_DEBUG=y
CONFIG_NAMESPACES=y
CONFIG_SCHED_AUTOGROUP=y
CONFIG_RELAY=y
CONFIG_BLK_DEV_INITRD=y
CONFIG_EXPERT=y
CONFIG_SYSCTL_SYSCALL=y
CONFIG_KALLSYMS_ALL=y
CONFIG_PERF_EVENTS=y
CONFIG_XTENSA_VARIANT_DC233C=y
CONFIG_XTENSA_UNALIGNED_USER=y
CONFIG_VECTORS_OFFSET=0x00002000
CONFIG_XTENSA_KSEG_512M=y
CONFIG_HIGHMEM=y
CONFIG_CMDLINE_BOOL=y
CONFIG_CMDLINE="console=ttyS0,115200n8 ip=dhcp root=/dev/nfs rw debug memmap=0x80000000@0"
CONFIG_USE_OF=y
CONFIG_BUILTIN_DTB_SOURCE="virt"
# CONFIG_PARSE_BOOTPARAM is not set
CONFIG_JUMP_LABEL=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
# CONFIG_COMPACTION is not set
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
CONFIG_INET=y
CONFIG_IP_MULTICAST=y
CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
CONFIG_IP_PNP_BOOTP=y
CONFIG_IP_PNP_RARP=y
# CONFIG_IPV6 is not set
CONFIG_NETFILTER=y
# CONFIG_WIRELESS is not set
CONFIG_PCI=y
CONFIG_PCI_HOST_GENERIC=y
CONFIG_UEVENT_HELPER=y
CONFIG_DEVTMPFS=y
CONFIG_DEVTMPFS_MOUNT=y
# CONFIG_STANDALONE is not set
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_RAM=y
CONFIG_VIRTIO_BLK=y
CONFIG_SCSI=y
CONFIG_BLK_DEV_SD=y
CONFIG_NETDEVICES=y
CONFIG_VIRTIO_NET=y
# CONFIG_ETHERNET is not set
# CONFIG_WLAN is not set
CONFIG_INPUT_MOUSEDEV=y
CONFIG_INPUT_EVDEV=y
# CONFIG_INPUT_KEYBOARD is not set
# CONFIG_LEGACY_PTYS is not set
CONFIG_HW_RANDOM=y
CONFIG_HW_RANDOM_VIRTIO=y
# CONFIG_HWMON is not set
CONFIG_DRM=y
CONFIG_DRM_VGEM=y
CONFIG_DRM_VIRTIO_GPU=y
CONFIG_FB_MODE_HELPERS=y
# CONFIG_VGA_CONSOLE is not set
CONFIG_FRAMEBUFFER_CONSOLE=y
CONFIG_LOGO=y
# CONFIG_USB_SUPPORT is not set
CONFIG_VIRTIO_PCI=y
CONFIG_VIRTIO_INPUT=y
# CONFIG_IOMMU_SUPPORT is not set
CONFIG_EXT3_FS=y
CONFIG_FANOTIFY=y
CONFIG_VFAT_FS=y
CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
CONFIG_TMPFS_POSIX_ACL=y
CONFIG_NFS_FS=y
CONFIG_NFS_V4=y
CONFIG_NFS_SWAP=y
CONFIG_ROOT_NFS=y
CONFIG_SUNRPC_DEBUG=y
CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_ISO8859_1=y
CONFIG_CRYPTO_ECHAINIV=y
CONFIG_CRYPTO_DEFLATE=y
CONFIG_CRYPTO_LZO=y
CONFIG_CRYPTO_ANSI_CPRNG=y
CONFIG_CRYPTO_DEV_VIRTIO=y
CONFIG_FONTS=y
CONFIG_PRINTK_TIME=y
CONFIG_DYNAMIC_DEBUG=y
CONFIG_DEBUG_INFO=y
CONFIG_MAGIC_SYSRQ=y
# CONFIG_SCHED_DEBUG is not set
CONFIG_SCHEDSTATS=y
CONFIG_DEBUG_RT_MUTEXES=y
CONFIG_DEBUG_SPINLOCK=y
CONFIG_DEBUG_MUTEXES=y
CONFIG_DEBUG_ATOMIC_SLEEP=y
CONFIG_STACKTRACE=y
CONFIG_RCU_TRACE=y
# CONFIG_FTRACE is not set
# CONFIG_S32C1I_SELFTEST is not set
...@@ -191,4 +191,50 @@ ...@@ -191,4 +191,50 @@
#endif #endif
.endm .endm
#define XTENSA_STACK_ALIGNMENT 16
#if defined(__XTENSA_WINDOWED_ABI__)
#define XTENSA_FRAME_SIZE_RESERVE 16
#define XTENSA_SPILL_STACK_RESERVE 32
#define abi_entry(frame_size) \
entry sp, (XTENSA_FRAME_SIZE_RESERVE + \
(((frame_size) + XTENSA_STACK_ALIGNMENT - 1) & \
-XTENSA_STACK_ALIGNMENT))
#define abi_entry_default abi_entry(0)
#define abi_ret(frame_size) retw
#define abi_ret_default retw
#elif defined(__XTENSA_CALL0_ABI__)
#define XTENSA_SPILL_STACK_RESERVE 0
#define abi_entry(frame_size) __abi_entry (frame_size)
.macro __abi_entry frame_size
.ifgt \frame_size
addi sp, sp, -(((\frame_size) + XTENSA_STACK_ALIGNMENT - 1) & \
-XTENSA_STACK_ALIGNMENT)
.endif
.endm
#define abi_entry_default
#define abi_ret(frame_size) __abi_ret (frame_size)
.macro __abi_ret frame_size
.ifgt \frame_size
addi sp, sp, (((\frame_size) + XTENSA_STACK_ALIGNMENT - 1) & \
-XTENSA_STACK_ALIGNMENT)
.endif
ret
.endm
#define abi_ret_default ret
#else
#error Unsupported Xtensa ABI
#endif
#endif /* _XTENSA_ASMMACRO_H */ #endif /* _XTENSA_ASMMACRO_H */
...@@ -54,16 +54,6 @@ extern void platform_idle (void); ...@@ -54,16 +54,6 @@ extern void platform_idle (void);
*/ */
extern void platform_heartbeat (void); extern void platform_heartbeat (void);
/*
* platform_pcibios_init is called to allow the platform to setup the pci bus.
*/
extern void platform_pcibios_init (void);
/*
* platform_pcibios_fixup allows to modify the PCI configuration.
*/
extern int platform_pcibios_fixup (void);
/* /*
* platform_calibrate_ccount calibrates cpu clock freq (CONFIG_XTENSA_CALIBRATE) * platform_calibrate_ccount calibrates cpu clock freq (CONFIG_XTENSA_CALIBRATE)
*/ */
......
/*
* include/asm-xtensa/types.h
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2001 - 2005 Tensilica Inc.
*/
#ifndef _XTENSA_TYPES_H
#define _XTENSA_TYPES_H
#include <uapi/asm/types.h>
#ifndef __ASSEMBLY__
/*
* These aren't exported outside the kernel to avoid name space clashes
*/
#define BITS_PER_LONG 32
#endif
#endif /* _XTENSA_TYPES_H */
...@@ -121,7 +121,9 @@ ...@@ -121,7 +121,9 @@
ENTRY(coprocessor_flush) ENTRY(coprocessor_flush)
entry a1, 32 /* reserve 4 bytes on stack to save a0 */
abi_entry(4)
s32i a0, a1, 0 s32i a0, a1, 0
movi a0, .Lsave_cp_regs_jump_table movi a0, .Lsave_cp_regs_jump_table
addx8 a3, a3, a0 addx8 a3, a3, a0
...@@ -131,7 +133,8 @@ ENTRY(coprocessor_flush) ...@@ -131,7 +133,8 @@ ENTRY(coprocessor_flush)
beqz a3, 1f beqz a3, 1f
callx0 a3 callx0 a3
1: l32i a0, a1, 0 1: l32i a0, a1, 0
retw
abi_ret(4)
ENDPROC(coprocessor_flush) ENDPROC(coprocessor_flush)
......
...@@ -1842,7 +1842,8 @@ ENDPROC(fast_store_prohibited) ...@@ -1842,7 +1842,8 @@ ENDPROC(fast_store_prohibited)
ENTRY(system_call) ENTRY(system_call)
entry a1, 32 /* reserve 4 bytes on stack for function parameter */
abi_entry(4)
/* regs->syscall = regs->areg[2] */ /* regs->syscall = regs->areg[2] */
...@@ -1892,7 +1893,7 @@ ENTRY(system_call) ...@@ -1892,7 +1893,7 @@ ENTRY(system_call)
s32i a6, a2, PT_AREG2 s32i a6, a2, PT_AREG2
bnez a3, 1f bnez a3, 1f
retw abi_ret(4)
1: 1:
l32i a4, a1, 4 l32i a4, a1, 4
...@@ -1901,7 +1902,7 @@ ENTRY(system_call) ...@@ -1901,7 +1902,7 @@ ENTRY(system_call)
mov a6, a2 mov a6, a2
call4 do_syscall_trace_leave call4 do_syscall_trace_leave
s32i a3, a2, PT_SYSCALL s32i a3, a2, PT_SYSCALL
retw abi_ret(4)
ENDPROC(system_call) ENDPROC(system_call)
...@@ -1952,7 +1953,7 @@ ENDPROC(system_call) ...@@ -1952,7 +1953,7 @@ ENDPROC(system_call)
ENTRY(_switch_to) ENTRY(_switch_to)
entry a1, 48 abi_entry(XTENSA_SPILL_STACK_RESERVE)
mov a11, a3 # and 'next' (a3) mov a11, a3 # and 'next' (a3)
...@@ -2013,7 +2014,7 @@ ENTRY(_switch_to) ...@@ -2013,7 +2014,7 @@ ENTRY(_switch_to)
wsr a14, ps wsr a14, ps
rsync rsync
retw abi_ret(XTENSA_SPILL_STACK_RESERVE)
ENDPROC(_switch_to) ENDPROC(_switch_to)
......
...@@ -11,6 +11,7 @@ ...@@ -11,6 +11,7 @@
*/ */
#include <linux/linkage.h> #include <linux/linkage.h>
#include <asm/asmmacro.h>
#include <asm/ftrace.h> #include <asm/ftrace.h>
/* /*
...@@ -21,13 +22,13 @@ ...@@ -21,13 +22,13 @@
ENTRY(_mcount) ENTRY(_mcount)
entry a1, 16 abi_entry_default
movi a4, ftrace_trace_function movi a4, ftrace_trace_function
l32i a4, a4, 0 l32i a4, a4, 0
movi a3, ftrace_stub movi a3, ftrace_stub
bne a3, a4, 1f bne a3, a4, 1f
retw abi_ret_default
1: xor a7, a2, a1 1: xor a7, a2, a1
movi a3, 0x3fffffff movi a3, 0x3fffffff
...@@ -40,11 +41,11 @@ ENTRY(_mcount) ...@@ -40,11 +41,11 @@ ENTRY(_mcount)
addi a6, a6, -MCOUNT_INSN_SIZE addi a6, a6, -MCOUNT_INSN_SIZE
callx4 a4 callx4 a4
retw abi_ret_default
ENDPROC(_mcount) ENDPROC(_mcount)
ENTRY(ftrace_stub) ENTRY(ftrace_stub)
entry a1, 16 abi_entry_default
retw abi_ret_default
ENDPROC(ftrace_stub) ENDPROC(ftrace_stub)
...@@ -24,23 +24,6 @@ ...@@ -24,23 +24,6 @@
#include <asm/pci-bridge.h> #include <asm/pci-bridge.h>
#include <asm/platform.h> #include <asm/platform.h>
/* PCI Controller */
/*
* pcibios_alloc_controller
* pcibios_enable_device
* pcibios_fixups
* pcibios_align_resource
* pcibios_fixup_bus
* pci_bus_add_device
*/
static struct pci_controller *pci_ctrl_head;
static struct pci_controller **pci_ctrl_tail = &pci_ctrl_head;
static int pci_bus_count;
/* /*
* We need to avoid collisions with `mirrored' VGA ports * We need to avoid collisions with `mirrored' VGA ports
* and other strange ISA hardware, so we always want the * and other strange ISA hardware, so we always want the
...@@ -75,81 +58,6 @@ pcibios_align_resource(void *data, const struct resource *res, ...@@ -75,81 +58,6 @@ pcibios_align_resource(void *data, const struct resource *res,
return start; return start;
} }
static void __init pci_controller_apertures(struct pci_controller *pci_ctrl,
struct list_head *resources)
{
struct resource *res;
unsigned long io_offset;
int i;
io_offset = (unsigned long)pci_ctrl->io_space.base;
res = &pci_ctrl->io_resource;
if (!res->flags) {
if (io_offset)
pr_err("I/O resource not set for host bridge %d\n",
pci_ctrl->index);
res->start = 0;
res->end = IO_SPACE_LIMIT;
res->flags = IORESOURCE_IO;
}
res->start += io_offset;
res->end += io_offset;
pci_add_resource_offset(resources, res, io_offset);
for (i = 0; i < 3; i++) {
res = &pci_ctrl->mem_resources[i];
if (!res->flags) {
if (i > 0)
continue;
pr_err("Memory resource not set for host bridge %d\n",
pci_ctrl->index);
res->start = 0;
res->end = ~0U;
res->flags = IORESOURCE_MEM;
}
pci_add_resource(resources, res);
}
}
static int __init pcibios_init(void)
{
struct pci_controller *pci_ctrl;
struct list_head resources;
struct pci_bus *bus;
int next_busno = 0, ret;
pr_info("PCI: Probing PCI hardware\n");
/* Scan all of the recorded PCI controllers. */
for (pci_ctrl = pci_ctrl_head; pci_ctrl; pci_ctrl = pci_ctrl->next) {
pci_ctrl->last_busno = 0xff;
INIT_LIST_HEAD(&resources);
pci_controller_apertures(pci_ctrl, &resources);
bus = pci_scan_root_bus(NULL, pci_ctrl->first_busno,
pci_ctrl->ops, pci_ctrl, &resources);
if (!bus)
continue;
pci_ctrl->bus = bus;
pci_ctrl->last_busno = bus->busn_res.end;
if (next_busno <= pci_ctrl->last_busno)
next_busno = pci_ctrl->last_busno+1;
}
pci_bus_count = next_busno;
ret = platform_pcibios_fixup();
if (ret)
return ret;
for (pci_ctrl = pci_ctrl_head; pci_ctrl; pci_ctrl = pci_ctrl->next) {
if (pci_ctrl->bus)
pci_bus_add_devices(pci_ctrl->bus);
}
return 0;
}
subsys_initcall(pcibios_init);
void pcibios_fixup_bus(struct pci_bus *bus) void pcibios_fixup_bus(struct pci_bus *bus)
{ {
if (bus->parent) { if (bus->parent) {
...@@ -158,38 +66,6 @@ void pcibios_fixup_bus(struct pci_bus *bus) ...@@ -158,38 +66,6 @@ void pcibios_fixup_bus(struct pci_bus *bus)
} }
} }
void pcibios_set_master(struct pci_dev *dev)
{
/* No special bus mastering setup handling */
}
int pcibios_enable_device(struct pci_dev *dev, int mask)
{
u16 cmd, old_cmd;
int idx;
struct resource *r;
pci_read_config_word(dev, PCI_COMMAND, &cmd);
old_cmd = cmd;
for (idx=0; idx<6; idx++) {
r = &dev->resource[idx];
if (!r->start && r->end) {
pci_err(dev, "can't enable device: resource collisions\n");
return -EINVAL;
}
if (r->flags & IORESOURCE_IO)
cmd |= PCI_COMMAND_IO;
if (r->flags & IORESOURCE_MEM)
cmd |= PCI_COMMAND_MEMORY;
}
if (cmd != old_cmd) {
pci_info(dev, "enabling device (%04x -> %04x)\n", old_cmd, cmd);
pci_write_config_word(dev, PCI_COMMAND, cmd);
}
return 0;
}
/* /*
* Platform support for /proc/bus/pci/X/Y mmap()s. * Platform support for /proc/bus/pci/X/Y mmap()s.
* -- paulus. * -- paulus.
......
...@@ -34,8 +34,6 @@ _F(void, halt, (void), { while(1); }); ...@@ -34,8 +34,6 @@ _F(void, halt, (void), { while(1); });
_F(void, power_off, (void), { while(1); }); _F(void, power_off, (void), { while(1); });
_F(void, idle, (void), { __asm__ __volatile__ ("waiti 0" ::: "memory"); }); _F(void, idle, (void), { __asm__ __volatile__ ("waiti 0" ::: "memory"); });
_F(void, heartbeat, (void), { }); _F(void, heartbeat, (void), { });
_F(int, pcibios_fixup, (void), { return 0; });
_F(void, pcibios_init, (void), { });
#ifdef CONFIG_XTENSA_CALIBRATE_CCOUNT #ifdef CONFIG_XTENSA_CALIBRATE_CCOUNT
_F(void, calibrate_ccount, (void), _F(void, calibrate_ccount, (void),
......
...@@ -405,10 +405,6 @@ void __init setup_arch(char **cmdline_p) ...@@ -405,10 +405,6 @@ void __init setup_arch(char **cmdline_p)
conswitchp = &dummy_con; conswitchp = &dummy_con;
# endif # endif
#endif #endif
#ifdef CONFIG_PCI
platform_pcibios_init();
#endif
} }
static DEFINE_PER_CPU(struct cpu, cpu_data); static DEFINE_PER_CPU(struct cpu, cpu_data);
......
...@@ -43,7 +43,7 @@ ENTRY(csum_partial) ...@@ -43,7 +43,7 @@ ENTRY(csum_partial)
* Experiments with Ethernet and SLIP connections show that buf * Experiments with Ethernet and SLIP connections show that buf
* is aligned on either a 2-byte or 4-byte boundary. * is aligned on either a 2-byte or 4-byte boundary.
*/ */
entry sp, 32 abi_entry_default
extui a5, a2, 0, 2 extui a5, a2, 0, 2
bnez a5, 8f /* branch if 2-byte aligned */ bnez a5, 8f /* branch if 2-byte aligned */
/* Fall-through on common case, 4-byte alignment */ /* Fall-through on common case, 4-byte alignment */
...@@ -107,7 +107,7 @@ ENTRY(csum_partial) ...@@ -107,7 +107,7 @@ ENTRY(csum_partial)
ONES_ADD(a4, a6) ONES_ADD(a4, a6)
7: 7:
mov a2, a4 mov a2, a4
retw abi_ret_default
/* uncommon case, buf is 2-byte aligned */ /* uncommon case, buf is 2-byte aligned */
8: 8:
...@@ -195,7 +195,7 @@ unsigned int csum_partial_copy_generic (const char *src, char *dst, int len, ...@@ -195,7 +195,7 @@ unsigned int csum_partial_copy_generic (const char *src, char *dst, int len,
ENTRY(csum_partial_copy_generic) ENTRY(csum_partial_copy_generic)
entry sp, 32 abi_entry_default
mov a12, a3 mov a12, a3
mov a11, a4 mov a11, a4
or a10, a2, a3 or a10, a2, a3
...@@ -316,7 +316,7 @@ EX(11f) s8i a9, a3, 0 ...@@ -316,7 +316,7 @@ EX(11f) s8i a9, a3, 0
ONES_ADD(a5, a9) ONES_ADD(a5, a9)
8: 8:
mov a2, a5 mov a2, a5
retw abi_ret_default
5: 5:
/* Control branch to here when either src or dst is odd. We /* Control branch to here when either src or dst is odd. We
...@@ -383,12 +383,12 @@ ENDPROC(csum_partial_copy_generic) ...@@ -383,12 +383,12 @@ ENDPROC(csum_partial_copy_generic)
blt a12, a11, .Leloop blt a12, a11, .Leloop
#endif #endif
2: 2:
retw abi_ret_default
11: 11:
movi a2, -EFAULT movi a2, -EFAULT
s32i a2, a7, 0 /* dst_err_ptr */ s32i a2, a7, 0 /* dst_err_ptr */
movi a2, 0 movi a2, 0
retw abi_ret_default
.previous .previous
...@@ -79,7 +79,7 @@ ...@@ -79,7 +79,7 @@
bne a3, a7, .Lnextbyte # continue loop if $a3:src != $a7:src_end bne a3, a7, .Lnextbyte # continue loop if $a3:src != $a7:src_end
#endif /* !XCHAL_HAVE_LOOPS */ #endif /* !XCHAL_HAVE_LOOPS */
.Lbytecopydone: .Lbytecopydone:
retw abi_ret_default
/* /*
* Destination is unaligned * Destination is unaligned
...@@ -112,7 +112,7 @@ ...@@ -112,7 +112,7 @@
ENTRY(__memcpy) ENTRY(__memcpy)
WEAK(memcpy) WEAK(memcpy)
entry sp, 16 # minimal stack frame abi_entry_default
# a2/ dst, a3/ src, a4/ len # a2/ dst, a3/ src, a4/ len
mov a5, a2 # copy dst so that a2 is return value mov a5, a2 # copy dst so that a2 is return value
.Lcommon: .Lcommon:
...@@ -161,7 +161,7 @@ WEAK(memcpy) ...@@ -161,7 +161,7 @@ WEAK(memcpy)
bbsi.l a4, 2, .L3 bbsi.l a4, 2, .L3
bbsi.l a4, 1, .L4 bbsi.l a4, 1, .L4
bbsi.l a4, 0, .L5 bbsi.l a4, 0, .L5
retw abi_ret_default
.L3: .L3:
# copy 4 bytes # copy 4 bytes
l32i a6, a3, 0 l32i a6, a3, 0
...@@ -170,7 +170,7 @@ WEAK(memcpy) ...@@ -170,7 +170,7 @@ WEAK(memcpy)
addi a5, a5, 4 addi a5, a5, 4
bbsi.l a4, 1, .L4 bbsi.l a4, 1, .L4
bbsi.l a4, 0, .L5 bbsi.l a4, 0, .L5
retw abi_ret_default
.L4: .L4:
# copy 2 bytes # copy 2 bytes
l16ui a6, a3, 0 l16ui a6, a3, 0
...@@ -178,12 +178,12 @@ WEAK(memcpy) ...@@ -178,12 +178,12 @@ WEAK(memcpy)
s16i a6, a5, 0 s16i a6, a5, 0
addi a5, a5, 2 addi a5, a5, 2
bbsi.l a4, 0, .L5 bbsi.l a4, 0, .L5
retw abi_ret_default
.L5: .L5:
# copy 1 byte # copy 1 byte
l8ui a6, a3, 0 l8ui a6, a3, 0
s8i a6, a5, 0 s8i a6, a5, 0
retw abi_ret_default
/* /*
* Destination is aligned, Source is unaligned * Destination is aligned, Source is unaligned
...@@ -255,7 +255,7 @@ WEAK(memcpy) ...@@ -255,7 +255,7 @@ WEAK(memcpy)
#endif #endif
bbsi.l a4, 1, .L14 bbsi.l a4, 1, .L14
bbsi.l a4, 0, .L15 bbsi.l a4, 0, .L15
.Ldone: retw .Ldone: abi_ret_default
.L14: .L14:
# copy 2 bytes # copy 2 bytes
l8ui a6, a3, 0 l8ui a6, a3, 0
...@@ -265,12 +265,12 @@ WEAK(memcpy) ...@@ -265,12 +265,12 @@ WEAK(memcpy)
s8i a7, a5, 1 s8i a7, a5, 1
addi a5, a5, 2 addi a5, a5, 2
bbsi.l a4, 0, .L15 bbsi.l a4, 0, .L15
retw abi_ret_default
.L15: .L15:
# copy 1 byte # copy 1 byte
l8ui a6, a3, 0 l8ui a6, a3, 0
s8i a6, a5, 0 s8i a6, a5, 0
retw abi_ret_default
ENDPROC(__memcpy) ENDPROC(__memcpy)
...@@ -280,7 +280,7 @@ ENDPROC(__memcpy) ...@@ -280,7 +280,7 @@ ENDPROC(__memcpy)
ENTRY(bcopy) ENTRY(bcopy)
entry sp, 16 # minimal stack frame abi_entry_default
# a2=src, a3=dst, a4=len # a2=src, a3=dst, a4=len
mov a5, a3 mov a5, a3
mov a3, a2 mov a3, a2
...@@ -346,7 +346,7 @@ ENDPROC(bcopy) ...@@ -346,7 +346,7 @@ ENDPROC(bcopy)
# $a3:src != $a7:src_start # $a3:src != $a7:src_start
#endif /* !XCHAL_HAVE_LOOPS */ #endif /* !XCHAL_HAVE_LOOPS */
.Lbackbytecopydone: .Lbackbytecopydone:
retw abi_ret_default
/* /*
* Destination is unaligned * Destination is unaligned
...@@ -380,7 +380,7 @@ ENDPROC(bcopy) ...@@ -380,7 +380,7 @@ ENDPROC(bcopy)
ENTRY(__memmove) ENTRY(__memmove)
WEAK(memmove) WEAK(memmove)
entry sp, 16 # minimal stack frame abi_entry_default
# a2/ dst, a3/ src, a4/ len # a2/ dst, a3/ src, a4/ len
mov a5, a2 # copy dst so that a2 is return value mov a5, a2 # copy dst so that a2 is return value
.Lmovecommon: .Lmovecommon:
...@@ -435,7 +435,7 @@ WEAK(memmove) ...@@ -435,7 +435,7 @@ WEAK(memmove)
bbsi.l a4, 2, .Lback3 bbsi.l a4, 2, .Lback3
bbsi.l a4, 1, .Lback4 bbsi.l a4, 1, .Lback4
bbsi.l a4, 0, .Lback5 bbsi.l a4, 0, .Lback5
retw abi_ret_default
.Lback3: .Lback3:
# copy 4 bytes # copy 4 bytes
addi a3, a3, -4 addi a3, a3, -4
...@@ -444,7 +444,7 @@ WEAK(memmove) ...@@ -444,7 +444,7 @@ WEAK(memmove)
s32i a6, a5, 0 s32i a6, a5, 0
bbsi.l a4, 1, .Lback4 bbsi.l a4, 1, .Lback4
bbsi.l a4, 0, .Lback5 bbsi.l a4, 0, .Lback5
retw abi_ret_default
.Lback4: .Lback4:
# copy 2 bytes # copy 2 bytes
addi a3, a3, -2 addi a3, a3, -2
...@@ -452,14 +452,14 @@ WEAK(memmove) ...@@ -452,14 +452,14 @@ WEAK(memmove)
addi a5, a5, -2 addi a5, a5, -2
s16i a6, a5, 0 s16i a6, a5, 0
bbsi.l a4, 0, .Lback5 bbsi.l a4, 0, .Lback5
retw abi_ret_default
.Lback5: .Lback5:
# copy 1 byte # copy 1 byte
addi a3, a3, -1 addi a3, a3, -1
l8ui a6, a3, 0 l8ui a6, a3, 0
addi a5, a5, -1 addi a5, a5, -1
s8i a6, a5, 0 s8i a6, a5, 0
retw abi_ret_default
/* /*
* Destination is aligned, Source is unaligned * Destination is aligned, Source is unaligned
...@@ -531,7 +531,7 @@ WEAK(memmove) ...@@ -531,7 +531,7 @@ WEAK(memmove)
bbsi.l a4, 1, .Lback14 bbsi.l a4, 1, .Lback14
bbsi.l a4, 0, .Lback15 bbsi.l a4, 0, .Lback15
.Lbackdone: .Lbackdone:
retw abi_ret_default
.Lback14: .Lback14:
# copy 2 bytes # copy 2 bytes
addi a3, a3, -2 addi a3, a3, -2
...@@ -541,13 +541,13 @@ WEAK(memmove) ...@@ -541,13 +541,13 @@ WEAK(memmove)
s8i a6, a5, 0 s8i a6, a5, 0
s8i a7, a5, 1 s8i a7, a5, 1
bbsi.l a4, 0, .Lback15 bbsi.l a4, 0, .Lback15
retw abi_ret_default
.Lback15: .Lback15:
# copy 1 byte # copy 1 byte
addi a3, a3, -1 addi a3, a3, -1
addi a5, a5, -1 addi a5, a5, -1
l8ui a6, a3, 0 l8ui a6, a3, 0
s8i a6, a5, 0 s8i a6, a5, 0
retw abi_ret_default
ENDPROC(__memmove) ENDPROC(__memmove)
...@@ -34,7 +34,7 @@ ...@@ -34,7 +34,7 @@
ENTRY(__memset) ENTRY(__memset)
WEAK(memset) WEAK(memset)
entry sp, 16 # minimal stack frame abi_entry_default
# a2/ dst, a3/ c, a4/ length # a2/ dst, a3/ c, a4/ length
extui a3, a3, 0, 8 # mask to just 8 bits extui a3, a3, 0, 8 # mask to just 8 bits
slli a7, a3, 8 # duplicate character in all bytes of word slli a7, a3, 8 # duplicate character in all bytes of word
...@@ -48,7 +48,7 @@ WEAK(memset) ...@@ -48,7 +48,7 @@ WEAK(memset)
srli a7, a4, 4 # number of loop iterations with 16B srli a7, a4, 4 # number of loop iterations with 16B
# per iteration # per iteration
bnez a4, .Laligned bnez a4, .Laligned
retw abi_ret_default
/* /*
* Destination is word-aligned. * Destination is word-aligned.
...@@ -95,7 +95,7 @@ EX(10f) s16i a3, a5, 0 ...@@ -95,7 +95,7 @@ EX(10f) s16i a3, a5, 0
EX(10f) s8i a3, a5, 0 EX(10f) s8i a3, a5, 0
.L5: .L5:
.Lret1: .Lret1:
retw abi_ret_default
/* /*
* Destination is unaligned * Destination is unaligned
...@@ -139,7 +139,7 @@ EX(10f) s8i a3, a5, 0 ...@@ -139,7 +139,7 @@ EX(10f) s8i a3, a5, 0
blt a5, a6, .Lbyteloop blt a5, a6, .Lbyteloop
#endif /* !XCHAL_HAVE_LOOPS */ #endif /* !XCHAL_HAVE_LOOPS */
.Lbytesetdone: .Lbytesetdone:
retw abi_ret_default
ENDPROC(__memset) ENDPROC(__memset)
...@@ -150,4 +150,4 @@ ENDPROC(__memset) ...@@ -150,4 +150,4 @@ ENDPROC(__memset)
10: 10:
movi a2, 0 movi a2, 0
retw abi_ret_default
...@@ -50,7 +50,7 @@ ...@@ -50,7 +50,7 @@
.text .text
ENTRY(__strncpy_user) ENTRY(__strncpy_user)
entry sp, 16 # minimal stack frame abi_entry_default
# a2/ dst, a3/ src, a4/ len # a2/ dst, a3/ src, a4/ len
mov a11, a2 # leave dst in return value register mov a11, a2 # leave dst in return value register
beqz a4, .Lret # if len is zero beqz a4, .Lret # if len is zero
...@@ -93,7 +93,7 @@ EX(10f) s8i a9, a11, 0 # store byte 0 ...@@ -93,7 +93,7 @@ EX(10f) s8i a9, a11, 0 # store byte 0
bnez a4, .Lsrcaligned # if len is nonzero bnez a4, .Lsrcaligned # if len is nonzero
.Lret: .Lret:
sub a2, a11, a2 # compute strlen sub a2, a11, a2 # compute strlen
retw abi_ret_default
/* /*
* dst is word-aligned, src is word-aligned * dst is word-aligned, src is word-aligned
...@@ -148,14 +148,14 @@ EX(10f) s8i a9, a11, 0 ...@@ -148,14 +148,14 @@ EX(10f) s8i a9, a11, 0
.Lz3: # byte 3 is zero .Lz3: # byte 3 is zero
addi a11, a11, 3 # advance dst pointer addi a11, a11, 3 # advance dst pointer
sub a2, a11, a2 # compute strlen sub a2, a11, a2 # compute strlen
retw abi_ret_default
.Lz0: # byte 0 is zero .Lz0: # byte 0 is zero
#ifdef __XTENSA_EB__ #ifdef __XTENSA_EB__
movi a9, 0 movi a9, 0
#endif /* __XTENSA_EB__ */ #endif /* __XTENSA_EB__ */
EX(10f) s8i a9, a11, 0 EX(10f) s8i a9, a11, 0
sub a2, a11, a2 # compute strlen sub a2, a11, a2 # compute strlen
retw abi_ret_default
.Lz1: # byte 1 is zero .Lz1: # byte 1 is zero
#ifdef __XTENSA_EB__ #ifdef __XTENSA_EB__
extui a9, a9, 16, 16 extui a9, a9, 16, 16
...@@ -163,7 +163,7 @@ EX(10f) s8i a9, a11, 0 ...@@ -163,7 +163,7 @@ EX(10f) s8i a9, a11, 0
EX(10f) s16i a9, a11, 0 EX(10f) s16i a9, a11, 0
addi a11, a11, 1 # advance dst pointer addi a11, a11, 1 # advance dst pointer
sub a2, a11, a2 # compute strlen sub a2, a11, a2 # compute strlen
retw abi_ret_default
.Lz2: # byte 2 is zero .Lz2: # byte 2 is zero
#ifdef __XTENSA_EB__ #ifdef __XTENSA_EB__
extui a9, a9, 16, 16 extui a9, a9, 16, 16
...@@ -173,7 +173,7 @@ EX(10f) s16i a9, a11, 0 ...@@ -173,7 +173,7 @@ EX(10f) s16i a9, a11, 0
EX(10f) s8i a9, a11, 2 EX(10f) s8i a9, a11, 2
addi a11, a11, 2 # advance dst pointer addi a11, a11, 2 # advance dst pointer
sub a2, a11, a2 # compute strlen sub a2, a11, a2 # compute strlen
retw abi_ret_default
.align 4 # 1 mod 4 alignment for LOOPNEZ .align 4 # 1 mod 4 alignment for LOOPNEZ
.byte 0 # (0 mod 4 alignment for LBEG) .byte 0 # (0 mod 4 alignment for LBEG)
...@@ -199,7 +199,7 @@ EX(10f) s8i a9, a11, 0 ...@@ -199,7 +199,7 @@ EX(10f) s8i a9, a11, 0
.Lunalignedend: .Lunalignedend:
sub a2, a11, a2 # compute strlen sub a2, a11, a2 # compute strlen
retw abi_ret_default
ENDPROC(__strncpy_user) ENDPROC(__strncpy_user)
...@@ -214,4 +214,4 @@ ENDPROC(__strncpy_user) ...@@ -214,4 +214,4 @@ ENDPROC(__strncpy_user)
10: 10:
11: 11:
movi a2, -EFAULT movi a2, -EFAULT
retw abi_ret_default
...@@ -45,7 +45,7 @@ ...@@ -45,7 +45,7 @@
.text .text
ENTRY(__strnlen_user) ENTRY(__strnlen_user)
entry sp, 16 # minimal stack frame abi_entry_default
# a2/ s, a3/ len # a2/ s, a3/ len
addi a4, a2, -4 # because we overincrement at the end; addi a4, a2, -4 # because we overincrement at the end;
# we compensate with load offsets of 4 # we compensate with load offsets of 4
...@@ -96,7 +96,7 @@ EX(10f) l32i a9, a4, 4 # load 4 bytes for remaining checks ...@@ -96,7 +96,7 @@ EX(10f) l32i a9, a4, 4 # load 4 bytes for remaining checks
addi a4, a4, 1 # advance string pointer addi a4, a4, 1 # advance string pointer
.L101: .L101:
sub a2, a4, a2 # compute length sub a2, a4, a2 # compute length
retw abi_ret_default
# NOTE that in several places below, we point to the byte just after # NOTE that in several places below, we point to the byte just after
# the zero byte in order to include the NULL terminator in the count. # the zero byte in order to include the NULL terminator in the count.
...@@ -106,15 +106,15 @@ EX(10f) l32i a9, a4, 4 # load 4 bytes for remaining checks ...@@ -106,15 +106,15 @@ EX(10f) l32i a9, a4, 4 # load 4 bytes for remaining checks
.Lz0: # byte 0 is zero .Lz0: # byte 0 is zero
addi a4, a4, 1 # point just beyond zero byte addi a4, a4, 1 # point just beyond zero byte
sub a2, a4, a2 # subtract to get length sub a2, a4, a2 # subtract to get length
retw abi_ret_default
.Lz1: # byte 1 is zero .Lz1: # byte 1 is zero
addi a4, a4, 1+1 # point just beyond zero byte addi a4, a4, 1+1 # point just beyond zero byte
sub a2, a4, a2 # subtract to get length sub a2, a4, a2 # subtract to get length
retw abi_ret_default
.Lz2: # byte 2 is zero .Lz2: # byte 2 is zero
addi a4, a4, 2+1 # point just beyond zero byte addi a4, a4, 2+1 # point just beyond zero byte
sub a2, a4, a2 # subtract to get length sub a2, a4, a2 # subtract to get length
retw abi_ret_default
.L1mod2: # address is odd .L1mod2: # address is odd
EX(10f) l8ui a9, a4, 4 # get byte 0 EX(10f) l8ui a9, a4, 4 # get byte 0
...@@ -130,7 +130,7 @@ EX(10f) l32i a9, a4, 0 # get word with first two bytes of string ...@@ -130,7 +130,7 @@ EX(10f) l32i a9, a4, 0 # get word with first two bytes of string
# byte 3 is zero # byte 3 is zero
addi a4, a4, 3+1 # point just beyond zero byte addi a4, a4, 3+1 # point just beyond zero byte
sub a2, a4, a2 # subtract to get length sub a2, a4, a2 # subtract to get length
retw abi_ret_default
ENDPROC(__strnlen_user) ENDPROC(__strnlen_user)
...@@ -138,4 +138,4 @@ ENDPROC(__strnlen_user) ...@@ -138,4 +138,4 @@ ENDPROC(__strnlen_user)
.align 4 .align 4
10: 10:
movi a2, 0 movi a2, 0
retw abi_ret_default
...@@ -60,7 +60,7 @@ ...@@ -60,7 +60,7 @@
.text .text
ENTRY(__xtensa_copy_user) ENTRY(__xtensa_copy_user)
entry sp, 16 # minimal stack frame abi_entry_default
# a2/ dst, a3/ src, a4/ len # a2/ dst, a3/ src, a4/ len
mov a5, a2 # copy dst so that a2 is return value mov a5, a2 # copy dst so that a2 is return value
mov a11, a4 # preserve original len for error case mov a11, a4 # preserve original len for error case
...@@ -75,7 +75,7 @@ ENTRY(__xtensa_copy_user) ...@@ -75,7 +75,7 @@ ENTRY(__xtensa_copy_user)
__ssa8 a3 # set shift amount from byte offset __ssa8 a3 # set shift amount from byte offset
bnez a4, .Lsrcunaligned bnez a4, .Lsrcunaligned
movi a2, 0 # return success for len==0 movi a2, 0 # return success for len==0
retw abi_ret_default
/* /*
* Destination is unaligned * Destination is unaligned
...@@ -127,7 +127,7 @@ EX(10f) s8i a6, a5, 0 ...@@ -127,7 +127,7 @@ EX(10f) s8i a6, a5, 0
#endif /* !XCHAL_HAVE_LOOPS */ #endif /* !XCHAL_HAVE_LOOPS */
.Lbytecopydone: .Lbytecopydone:
movi a2, 0 # return success for len bytes copied movi a2, 0 # return success for len bytes copied
retw abi_ret_default
/* /*
* Destination and source are word-aligned. * Destination and source are word-aligned.
...@@ -187,7 +187,7 @@ EX(10f) l8ui a6, a3, 0 ...@@ -187,7 +187,7 @@ EX(10f) l8ui a6, a3, 0
EX(10f) s8i a6, a5, 0 EX(10f) s8i a6, a5, 0
.L5: .L5:
movi a2, 0 # return success for len bytes copied movi a2, 0 # return success for len bytes copied
retw abi_ret_default
/* /*
* Destination is aligned, Source is unaligned * Destination is aligned, Source is unaligned
...@@ -264,7 +264,7 @@ EX(10f) l8ui a6, a3, 0 ...@@ -264,7 +264,7 @@ EX(10f) l8ui a6, a3, 0
EX(10f) s8i a6, a5, 0 EX(10f) s8i a6, a5, 0
.L15: .L15:
movi a2, 0 # return success for len bytes copied movi a2, 0 # return success for len bytes copied
retw abi_ret_default
ENDPROC(__xtensa_copy_user) ENDPROC(__xtensa_copy_user)
...@@ -281,4 +281,4 @@ ENDPROC(__xtensa_copy_user) ...@@ -281,4 +281,4 @@ ENDPROC(__xtensa_copy_user)
10: 10:
sub a2, a5, a2 /* a2 <-- bytes copied */ sub a2, a5, a2 /* a2 <-- bytes copied */
sub a2, a11, a2 /* a2 <-- bytes not copied */ sub a2, a11, a2 /* a2 <-- bytes not copied */
retw abi_ret_default
...@@ -45,10 +45,7 @@ void __init bootmem_init(void) ...@@ -45,10 +45,7 @@ void __init bootmem_init(void)
* If PHYS_OFFSET is zero reserve page at address 0: * If PHYS_OFFSET is zero reserve page at address 0:
* successfull allocations should never return NULL. * successfull allocations should never return NULL.
*/ */
if (PHYS_OFFSET) memblock_reserve(0, PHYS_OFFSET ? PHYS_OFFSET : 1);
memblock_reserve(0, PHYS_OFFSET);
else
memblock_reserve(0, 1);
early_init_fdt_scan_reserved_mem(); early_init_fdt_scan_reserved_mem();
......
...@@ -30,7 +30,7 @@ ...@@ -30,7 +30,7 @@
ENTRY(clear_page) ENTRY(clear_page)
entry a1, 16 abi_entry_default
movi a3, 0 movi a3, 0
__loopi a2, a7, PAGE_SIZE, 32 __loopi a2, a7, PAGE_SIZE, 32
...@@ -44,7 +44,7 @@ ENTRY(clear_page) ...@@ -44,7 +44,7 @@ ENTRY(clear_page)
s32i a3, a2, 28 s32i a3, a2, 28
__endla a2, a7, 32 __endla a2, a7, 32
retw abi_ret_default
ENDPROC(clear_page) ENDPROC(clear_page)
...@@ -57,7 +57,7 @@ ENDPROC(clear_page) ...@@ -57,7 +57,7 @@ ENDPROC(clear_page)
ENTRY(copy_page) ENTRY(copy_page)
entry a1, 16 abi_entry_default
__loopi a2, a4, PAGE_SIZE, 32 __loopi a2, a4, PAGE_SIZE, 32
...@@ -86,7 +86,7 @@ ENTRY(copy_page) ...@@ -86,7 +86,7 @@ ENTRY(copy_page)
__endl a2, a4 __endl a2, a4
retw abi_ret_default
ENDPROC(copy_page) ENDPROC(copy_page)
...@@ -116,7 +116,7 @@ ENTRY(__tlbtemp_mapping_start) ...@@ -116,7 +116,7 @@ ENTRY(__tlbtemp_mapping_start)
ENTRY(clear_page_alias) ENTRY(clear_page_alias)
entry a1, 32 abi_entry_default
/* Skip setting up a temporary DTLB if not aliased low page. */ /* Skip setting up a temporary DTLB if not aliased low page. */
...@@ -144,14 +144,14 @@ ENTRY(clear_page_alias) ...@@ -144,14 +144,14 @@ ENTRY(clear_page_alias)
__endla a2, a7, 32 __endla a2, a7, 32
bnez a6, 1f bnez a6, 1f
retw abi_ret_default
/* We need to invalidate the temporary idtlb entry, if any. */ /* We need to invalidate the temporary idtlb entry, if any. */
1: idtlb a4 1: idtlb a4
dsync dsync
retw abi_ret_default
ENDPROC(clear_page_alias) ENDPROC(clear_page_alias)
...@@ -164,7 +164,7 @@ ENDPROC(clear_page_alias) ...@@ -164,7 +164,7 @@ ENDPROC(clear_page_alias)
ENTRY(copy_page_alias) ENTRY(copy_page_alias)
entry a1, 32 abi_entry_default
/* Skip setting up a temporary DTLB for destination if not aliased. */ /* Skip setting up a temporary DTLB for destination if not aliased. */
...@@ -221,19 +221,19 @@ ENTRY(copy_page_alias) ...@@ -221,19 +221,19 @@ ENTRY(copy_page_alias)
bnez a6, 1f bnez a6, 1f
bnez a7, 2f bnez a7, 2f
retw abi_ret_default
1: addi a2, a2, -PAGE_SIZE 1: addi a2, a2, -PAGE_SIZE
idtlb a2 idtlb a2
dsync dsync
bnez a7, 2f bnez a7, 2f
retw abi_ret_default
2: addi a3, a3, -PAGE_SIZE+1 2: addi a3, a3, -PAGE_SIZE+1
idtlb a3 idtlb a3
dsync dsync
retw abi_ret_default
ENDPROC(copy_page_alias) ENDPROC(copy_page_alias)
...@@ -248,7 +248,7 @@ ENDPROC(copy_page_alias) ...@@ -248,7 +248,7 @@ ENDPROC(copy_page_alias)
ENTRY(__flush_invalidate_dcache_page_alias) ENTRY(__flush_invalidate_dcache_page_alias)
entry sp, 16 abi_entry_default
movi a7, 0 # required for exception handler movi a7, 0 # required for exception handler
addi a6, a3, (PAGE_KERNEL | _PAGE_HW_WRITE) addi a6, a3, (PAGE_KERNEL | _PAGE_HW_WRITE)
...@@ -261,7 +261,7 @@ ENTRY(__flush_invalidate_dcache_page_alias) ...@@ -261,7 +261,7 @@ ENTRY(__flush_invalidate_dcache_page_alias)
idtlb a4 idtlb a4
dsync dsync
retw abi_ret_default
ENDPROC(__flush_invalidate_dcache_page_alias) ENDPROC(__flush_invalidate_dcache_page_alias)
...@@ -272,7 +272,7 @@ ENDPROC(__flush_invalidate_dcache_page_alias) ...@@ -272,7 +272,7 @@ ENDPROC(__flush_invalidate_dcache_page_alias)
ENTRY(__invalidate_dcache_page_alias) ENTRY(__invalidate_dcache_page_alias)
entry sp, 16 abi_entry_default
movi a7, 0 # required for exception handler movi a7, 0 # required for exception handler
addi a6, a3, (PAGE_KERNEL | _PAGE_HW_WRITE) addi a6, a3, (PAGE_KERNEL | _PAGE_HW_WRITE)
...@@ -285,7 +285,7 @@ ENTRY(__invalidate_dcache_page_alias) ...@@ -285,7 +285,7 @@ ENTRY(__invalidate_dcache_page_alias)
idtlb a4 idtlb a4
dsync dsync
retw abi_ret_default
ENDPROC(__invalidate_dcache_page_alias) ENDPROC(__invalidate_dcache_page_alias)
#endif #endif
...@@ -296,7 +296,7 @@ ENTRY(__tlbtemp_mapping_itlb) ...@@ -296,7 +296,7 @@ ENTRY(__tlbtemp_mapping_itlb)
ENTRY(__invalidate_icache_page_alias) ENTRY(__invalidate_icache_page_alias)
entry sp, 16 abi_entry_default
addi a6, a3, (PAGE_KERNEL_EXEC | _PAGE_HW_WRITE) addi a6, a3, (PAGE_KERNEL_EXEC | _PAGE_HW_WRITE)
mov a4, a2 mov a4, a2
...@@ -307,7 +307,7 @@ ENTRY(__invalidate_icache_page_alias) ...@@ -307,7 +307,7 @@ ENTRY(__invalidate_icache_page_alias)
iitlb a4 iitlb a4
isync isync
retw abi_ret_default
ENDPROC(__invalidate_icache_page_alias) ENDPROC(__invalidate_icache_page_alias)
...@@ -325,12 +325,12 @@ ENTRY(__tlbtemp_mapping_end) ...@@ -325,12 +325,12 @@ ENTRY(__tlbtemp_mapping_end)
ENTRY(__invalidate_icache_page) ENTRY(__invalidate_icache_page)
entry sp, 16 abi_entry_default
___invalidate_icache_page a2 a3 ___invalidate_icache_page a2 a3
isync isync
retw abi_ret_default
ENDPROC(__invalidate_icache_page) ENDPROC(__invalidate_icache_page)
...@@ -340,12 +340,12 @@ ENDPROC(__invalidate_icache_page) ...@@ -340,12 +340,12 @@ ENDPROC(__invalidate_icache_page)
ENTRY(__invalidate_dcache_page) ENTRY(__invalidate_dcache_page)
entry sp, 16 abi_entry_default
___invalidate_dcache_page a2 a3 ___invalidate_dcache_page a2 a3
dsync dsync
retw abi_ret_default
ENDPROC(__invalidate_dcache_page) ENDPROC(__invalidate_dcache_page)
...@@ -355,12 +355,12 @@ ENDPROC(__invalidate_dcache_page) ...@@ -355,12 +355,12 @@ ENDPROC(__invalidate_dcache_page)
ENTRY(__flush_invalidate_dcache_page) ENTRY(__flush_invalidate_dcache_page)
entry sp, 16 abi_entry_default
___flush_invalidate_dcache_page a2 a3 ___flush_invalidate_dcache_page a2 a3
dsync dsync
retw abi_ret_default
ENDPROC(__flush_invalidate_dcache_page) ENDPROC(__flush_invalidate_dcache_page)
...@@ -370,12 +370,12 @@ ENDPROC(__flush_invalidate_dcache_page) ...@@ -370,12 +370,12 @@ ENDPROC(__flush_invalidate_dcache_page)
ENTRY(__flush_dcache_page) ENTRY(__flush_dcache_page)
entry sp, 16 abi_entry_default
___flush_dcache_page a2 a3 ___flush_dcache_page a2 a3
dsync dsync
retw abi_ret_default
ENDPROC(__flush_dcache_page) ENDPROC(__flush_dcache_page)
...@@ -385,12 +385,12 @@ ENDPROC(__flush_dcache_page) ...@@ -385,12 +385,12 @@ ENDPROC(__flush_dcache_page)
ENTRY(__invalidate_icache_range) ENTRY(__invalidate_icache_range)
entry sp, 16 abi_entry_default
___invalidate_icache_range a2 a3 a4 ___invalidate_icache_range a2 a3 a4
isync isync
retw abi_ret_default
ENDPROC(__invalidate_icache_range) ENDPROC(__invalidate_icache_range)
...@@ -400,12 +400,12 @@ ENDPROC(__invalidate_icache_range) ...@@ -400,12 +400,12 @@ ENDPROC(__invalidate_icache_range)
ENTRY(__flush_invalidate_dcache_range) ENTRY(__flush_invalidate_dcache_range)
entry sp, 16 abi_entry_default
___flush_invalidate_dcache_range a2 a3 a4 ___flush_invalidate_dcache_range a2 a3 a4
dsync dsync
retw abi_ret_default
ENDPROC(__flush_invalidate_dcache_range) ENDPROC(__flush_invalidate_dcache_range)
...@@ -415,12 +415,12 @@ ENDPROC(__flush_invalidate_dcache_range) ...@@ -415,12 +415,12 @@ ENDPROC(__flush_invalidate_dcache_range)
ENTRY(__flush_dcache_range) ENTRY(__flush_dcache_range)
entry sp, 16 abi_entry_default
___flush_dcache_range a2 a3 a4 ___flush_dcache_range a2 a3 a4
dsync dsync
retw abi_ret_default
ENDPROC(__flush_dcache_range) ENDPROC(__flush_dcache_range)
...@@ -430,11 +430,11 @@ ENDPROC(__flush_dcache_range) ...@@ -430,11 +430,11 @@ ENDPROC(__flush_dcache_range)
ENTRY(__invalidate_dcache_range) ENTRY(__invalidate_dcache_range)
entry sp, 16 abi_entry_default
___invalidate_dcache_range a2 a3 a4 ___invalidate_dcache_range a2 a3 a4
retw abi_ret_default
ENDPROC(__invalidate_dcache_range) ENDPROC(__invalidate_dcache_range)
...@@ -444,12 +444,12 @@ ENDPROC(__invalidate_dcache_range) ...@@ -444,12 +444,12 @@ ENDPROC(__invalidate_dcache_range)
ENTRY(__invalidate_icache_all) ENTRY(__invalidate_icache_all)
entry sp, 16 abi_entry_default
___invalidate_icache_all a2 a3 ___invalidate_icache_all a2 a3
isync isync
retw abi_ret_default
ENDPROC(__invalidate_icache_all) ENDPROC(__invalidate_icache_all)
...@@ -459,12 +459,12 @@ ENDPROC(__invalidate_icache_all) ...@@ -459,12 +459,12 @@ ENDPROC(__invalidate_icache_all)
ENTRY(__flush_invalidate_dcache_all) ENTRY(__flush_invalidate_dcache_all)
entry sp, 16 abi_entry_default
___flush_invalidate_dcache_all a2 a3 ___flush_invalidate_dcache_all a2 a3
dsync dsync
retw abi_ret_default
ENDPROC(__flush_invalidate_dcache_all) ENDPROC(__flush_invalidate_dcache_all)
...@@ -474,11 +474,11 @@ ENDPROC(__flush_invalidate_dcache_all) ...@@ -474,11 +474,11 @@ ENDPROC(__flush_invalidate_dcache_all)
ENTRY(__invalidate_dcache_all) ENTRY(__invalidate_dcache_all)
entry sp, 16 abi_entry_default
___invalidate_dcache_all a2 a3 ___invalidate_dcache_all a2 a3
dsync dsync
retw abi_ret_default
ENDPROC(__invalidate_dcache_all) ENDPROC(__invalidate_dcache_all)
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment