Commit 2f26e424 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'loongarch-6.2' of...

Merge tag 'loongarch-6.2' of git://git.kernel.org/pub/scm/linux/kernel/git/chenhuacai/linux-loongson

Pull LoongArch updates from Huacai Chen:

 - Switch to relative exception tables

 - Add unaligned access support

 - Add alternative runtime patching mechanism

 - Add FDT booting support from efi system table

 - Add suspend/hibernation (ACPI S3/S4) support

 - Add basic STACKPROTECTOR support

 - Add ftrace (function tracer) support

 - Update the default config file

* tag 'loongarch-6.2' of git://git.kernel.org/pub/scm/linux/kernel/git/chenhuacai/linux-loongson: (24 commits)
  LoongArch: Update Loongson-3 default config file
  LoongArch: modules/ftrace: Initialize PLT at load time
  LoongArch/ftrace: Add HAVE_FUNCTION_GRAPH_RET_ADDR_PTR support
  LoongArch/ftrace: Add HAVE_DYNAMIC_FTRACE_WITH_ARGS support
  LoongArch/ftrace: Add HAVE_DYNAMIC_FTRACE_WITH_REGS support
  LoongArch/ftrace: Add dynamic function graph tracer support
  LoongArch/ftrace: Add dynamic function tracer support
  LoongArch/ftrace: Add recordmcount support
  LoongArch/ftrace: Add basic support
  LoongArch: module: Use got/plt section indices for relocations
  LoongArch: Add basic STACKPROTECTOR support
  LoongArch: Add hibernation (ACPI S4) support
  LoongArch: Add suspend (ACPI S3) support
  LoongArch: Add processing ISA Node in DeviceTree
  LoongArch: Add FDT booting support from efi system table
  LoongArch: Use alternative to optimize libraries
  LoongArch: Add alternative runtime patching mechanism
  LoongArch: Add unaligned access support
  LoongArch: BPF: Add BPF exception tables
  LoongArch: Remove the .fixup section usage
  ...
parents 96bab5b9 5535f4f7
...@@ -436,8 +436,8 @@ ignore-unaligned-usertrap ...@@ -436,8 +436,8 @@ ignore-unaligned-usertrap
On architectures where unaligned accesses cause traps, and where this On architectures where unaligned accesses cause traps, and where this
feature is supported (``CONFIG_SYSCTL_ARCH_UNALIGN_NO_WARN``; feature is supported (``CONFIG_SYSCTL_ARCH_UNALIGN_NO_WARN``;
currently, ``arc`` and ``ia64``), controls whether all unaligned traps currently, ``arc``, ``ia64`` and ``loongarch``), controls whether all
are logged. unaligned traps are logged.
= ============================================================= = =============================================================
0 Log all unaligned accesses. 0 Log all unaligned accesses.
...@@ -1492,8 +1492,8 @@ unaligned-trap ...@@ -1492,8 +1492,8 @@ unaligned-trap
On architectures where unaligned accesses cause traps, and where this On architectures where unaligned accesses cause traps, and where this
feature is supported (``CONFIG_SYSCTL_ARCH_UNALIGN_ALLOW``; currently, feature is supported (``CONFIG_SYSCTL_ARCH_UNALIGN_ALLOW``; currently,
``arc`` and ``parisc``), controls whether unaligned traps are caught ``arc``, ``parisc`` and ``loongarch``), controls whether unaligned traps
and emulated (instead of failing). are caught and emulated (instead of failing).
= ======================================================== = ========================================================
0 Do not emulate unaligned accesses. 0 Do not emulate unaligned accesses.
......
...@@ -58,6 +58,7 @@ config LOONGARCH ...@@ -58,6 +58,7 @@ config LOONGARCH
select ARCH_WANTS_NO_INSTR select ARCH_WANTS_NO_INSTR
select BUILDTIME_TABLE_SORT select BUILDTIME_TABLE_SORT
select COMMON_CLK select COMMON_CLK
select CPU_PM
select EFI select EFI
select GENERIC_CLOCKEVENTS select GENERIC_CLOCKEVENTS
select GENERIC_CMOS_UPDATE select GENERIC_CMOS_UPDATE
...@@ -86,11 +87,18 @@ config LOONGARCH ...@@ -86,11 +87,18 @@ config LOONGARCH
select HAVE_ARCH_TRANSPARENT_HUGEPAGE select HAVE_ARCH_TRANSPARENT_HUGEPAGE
select HAVE_ASM_MODVERSIONS select HAVE_ASM_MODVERSIONS
select HAVE_CONTEXT_TRACKING_USER select HAVE_CONTEXT_TRACKING_USER
select HAVE_C_RECORDMCOUNT
select HAVE_DEBUG_STACKOVERFLOW select HAVE_DEBUG_STACKOVERFLOW
select HAVE_DMA_CONTIGUOUS select HAVE_DMA_CONTIGUOUS
select HAVE_DYNAMIC_FTRACE
select HAVE_DYNAMIC_FTRACE_WITH_ARGS
select HAVE_DYNAMIC_FTRACE_WITH_REGS
select HAVE_EBPF_JIT select HAVE_EBPF_JIT
select HAVE_EXIT_THREAD select HAVE_EXIT_THREAD
select HAVE_FAST_GUP select HAVE_FAST_GUP
select HAVE_FTRACE_MCOUNT_RECORD
select HAVE_FUNCTION_GRAPH_TRACER
select HAVE_FUNCTION_TRACER
select HAVE_GENERIC_VDSO select HAVE_GENERIC_VDSO
select HAVE_IOREMAP_PROT select HAVE_IOREMAP_PROT
select HAVE_IRQ_EXIT_ON_IRQ_STACK select HAVE_IRQ_EXIT_ON_IRQ_STACK
...@@ -104,6 +112,7 @@ config LOONGARCH ...@@ -104,6 +112,7 @@ config LOONGARCH
select HAVE_REGS_AND_STACK_ACCESS_API select HAVE_REGS_AND_STACK_ACCESS_API
select HAVE_RSEQ select HAVE_RSEQ
select HAVE_SETUP_PER_CPU_AREA if NUMA select HAVE_SETUP_PER_CPU_AREA if NUMA
select HAVE_STACKPROTECTOR
select HAVE_SYSCALL_TRACEPOINTS select HAVE_SYSCALL_TRACEPOINTS
select HAVE_TIF_NOHZ select HAVE_TIF_NOHZ
select HAVE_VIRT_CPU_ACCOUNTING_GEN if !SMP select HAVE_VIRT_CPU_ACCOUNTING_GEN if !SMP
...@@ -113,6 +122,8 @@ config LOONGARCH ...@@ -113,6 +122,8 @@ config LOONGARCH
select MODULES_USE_ELF_RELA if MODULES select MODULES_USE_ELF_RELA if MODULES
select NEED_PER_CPU_EMBED_FIRST_CHUNK select NEED_PER_CPU_EMBED_FIRST_CHUNK
select NEED_PER_CPU_PAGE_FIRST_CHUNK select NEED_PER_CPU_PAGE_FIRST_CHUNK
select OF
select OF_EARLY_FLATTREE
select PCI select PCI
select PCI_DOMAINS_GENERIC select PCI_DOMAINS_GENERIC
select PCI_ECAM if ACPI select PCI_ECAM if ACPI
...@@ -123,6 +134,8 @@ config LOONGARCH ...@@ -123,6 +134,8 @@ config LOONGARCH
select RTC_LIB select RTC_LIB
select SMP select SMP
select SPARSE_IRQ select SPARSE_IRQ
select SYSCTL_ARCH_UNALIGN_ALLOW
select SYSCTL_ARCH_UNALIGN_NO_WARN
select SYSCTL_EXCEPTION_TRACE select SYSCTL_EXCEPTION_TRACE
select SWIOTLB select SWIOTLB
select TRACE_IRQFLAGS_SUPPORT select TRACE_IRQFLAGS_SUPPORT
...@@ -516,6 +529,13 @@ config ARCH_MMAP_RND_BITS_MAX ...@@ -516,6 +529,13 @@ config ARCH_MMAP_RND_BITS_MAX
menu "Power management options" menu "Power management options"
config ARCH_SUSPEND_POSSIBLE
def_bool y
config ARCH_HIBERNATION_POSSIBLE
def_bool y
source "kernel/power/Kconfig"
source "drivers/acpi/Kconfig" source "drivers/acpi/Kconfig"
endmenu endmenu
......
...@@ -25,6 +25,11 @@ endif ...@@ -25,6 +25,11 @@ endif
32bit-emul = elf32loongarch 32bit-emul = elf32loongarch
64bit-emul = elf64loongarch 64bit-emul = elf64loongarch
ifdef CONFIG_DYNAMIC_FTRACE
KBUILD_CPPFLAGS += -DCC_USING_PATCHABLE_FUNCTION_ENTRY
CC_FLAGS_FTRACE := -fpatchable-function-entry=2
endif
ifdef CONFIG_64BIT ifdef CONFIG_64BIT
tool-archpref = $(64bit-tool-archpref) tool-archpref = $(64bit-tool-archpref)
UTS_MACHINE := loongarch64 UTS_MACHINE := loongarch64
...@@ -104,6 +109,9 @@ endif ...@@ -104,6 +109,9 @@ endif
libs-y += arch/loongarch/lib/ libs-y += arch/loongarch/lib/
libs-$(CONFIG_EFI_STUB) += $(objtree)/drivers/firmware/efi/libstub/lib.a libs-$(CONFIG_EFI_STUB) += $(objtree)/drivers/firmware/efi/libstub/lib.a
# suspend and hibernation support
drivers-$(CONFIG_PM) += arch/loongarch/power/
ifeq ($(KBUILD_EXTMOD),) ifeq ($(KBUILD_EXTMOD),)
prepare: vdso_prepare prepare: vdso_prepare
vdso_prepare: prepare0 vdso_prepare: prepare0
......
...@@ -34,12 +34,13 @@ CONFIG_SYSFS_DEPRECATED=y ...@@ -34,12 +34,13 @@ CONFIG_SYSFS_DEPRECATED=y
CONFIG_RELAY=y CONFIG_RELAY=y
CONFIG_BLK_DEV_INITRD=y CONFIG_BLK_DEV_INITRD=y
CONFIG_EXPERT=y CONFIG_EXPERT=y
CONFIG_USERFAULTFD=y CONFIG_KALLSYMS_ALL=y
CONFIG_PERF_EVENTS=y CONFIG_PERF_EVENTS=y
# CONFIG_COMPAT_BRK is not set
CONFIG_LOONGARCH=y CONFIG_LOONGARCH=y
CONFIG_64BIT=y CONFIG_64BIT=y
CONFIG_MACH_LOONGSON64=y CONFIG_MACH_LOONGSON64=y
CONFIG_PAGE_SIZE_16KB=y
CONFIG_HZ_250=y
CONFIG_DMI=y CONFIG_DMI=y
CONFIG_EFI=y CONFIG_EFI=y
CONFIG_SMP=y CONFIG_SMP=y
...@@ -47,14 +48,14 @@ CONFIG_HOTPLUG_CPU=y ...@@ -47,14 +48,14 @@ CONFIG_HOTPLUG_CPU=y
CONFIG_NR_CPUS=64 CONFIG_NR_CPUS=64
CONFIG_NUMA=y CONFIG_NUMA=y
CONFIG_KEXEC=y CONFIG_KEXEC=y
CONFIG_PAGE_SIZE_16KB=y CONFIG_SUSPEND=y
CONFIG_HZ_250=y CONFIG_HIBERNATION=y
CONFIG_ACPI=y CONFIG_ACPI=y
CONFIG_ACPI_SPCR_TABLE=y CONFIG_ACPI_SPCR_TABLE=y
CONFIG_ACPI_HOTPLUG_CPU=y
CONFIG_ACPI_TAD=y CONFIG_ACPI_TAD=y
CONFIG_ACPI_DOCK=y CONFIG_ACPI_DOCK=y
CONFIG_ACPI_IPMI=m CONFIG_ACPI_IPMI=m
CONFIG_ACPI_HOTPLUG_CPU=y
CONFIG_ACPI_PCI_SLOT=y CONFIG_ACPI_PCI_SLOT=y
CONFIG_ACPI_HOTPLUG_MEMORY=y CONFIG_ACPI_HOTPLUG_MEMORY=y
CONFIG_EFI_ZBOOT=y CONFIG_EFI_ZBOOT=y
...@@ -73,17 +74,19 @@ CONFIG_UNIXWARE_DISKLABEL=y ...@@ -73,17 +74,19 @@ CONFIG_UNIXWARE_DISKLABEL=y
CONFIG_IOSCHED_BFQ=y CONFIG_IOSCHED_BFQ=y
CONFIG_BFQ_GROUP_IOSCHED=y CONFIG_BFQ_GROUP_IOSCHED=y
CONFIG_BINFMT_MISC=m CONFIG_BINFMT_MISC=m
CONFIG_MEMORY_HOTPLUG=y CONFIG_ZPOOL=y
CONFIG_MEMORY_HOTPLUG_DEFAULT_ONLINE=y
CONFIG_MEMORY_HOTREMOVE=y
CONFIG_KSM=y
CONFIG_TRANSPARENT_HUGEPAGE=y
CONFIG_ZSWAP=y CONFIG_ZSWAP=y
CONFIG_ZSWAP_COMPRESSOR_DEFAULT_ZSTD=y CONFIG_ZSWAP_COMPRESSOR_DEFAULT_ZSTD=y
CONFIG_ZPOOL=y
CONFIG_ZBUD=y CONFIG_ZBUD=y
CONFIG_Z3FOLD=y CONFIG_Z3FOLD=y
CONFIG_ZSMALLOC=m CONFIG_ZSMALLOC=m
# CONFIG_COMPAT_BRK is not set
CONFIG_MEMORY_HOTPLUG=y
CONFIG_MEMORY_HOTPLUG_DEFAULT_ONLINE=y
CONFIG_MEMORY_HOTREMOVE=y
CONFIG_KSM=y
CONFIG_TRANSPARENT_HUGEPAGE=y
CONFIG_USERFAULTFD=y
CONFIG_NET=y CONFIG_NET=y
CONFIG_PACKET=y CONFIG_PACKET=y
CONFIG_UNIX=y CONFIG_UNIX=y
...@@ -118,7 +121,6 @@ CONFIG_NETFILTER=y ...@@ -118,7 +121,6 @@ CONFIG_NETFILTER=y
CONFIG_BRIDGE_NETFILTER=m CONFIG_BRIDGE_NETFILTER=m
CONFIG_NETFILTER_NETLINK_LOG=m CONFIG_NETFILTER_NETLINK_LOG=m
CONFIG_NF_CONNTRACK=m CONFIG_NF_CONNTRACK=m
CONFIG_NF_LOG_NETDEV=m
CONFIG_NF_CONNTRACK_AMANDA=m CONFIG_NF_CONNTRACK_AMANDA=m
CONFIG_NF_CONNTRACK_FTP=m CONFIG_NF_CONNTRACK_FTP=m
CONFIG_NF_CONNTRACK_NETBIOS_NS=m CONFIG_NF_CONNTRACK_NETBIOS_NS=m
...@@ -416,6 +418,7 @@ CONFIG_SCSI_VIRTIO=m ...@@ -416,6 +418,7 @@ CONFIG_SCSI_VIRTIO=m
CONFIG_ATA=y CONFIG_ATA=y
CONFIG_SATA_AHCI=y CONFIG_SATA_AHCI=y
CONFIG_SATA_AHCI_PLATFORM=y CONFIG_SATA_AHCI_PLATFORM=y
CONFIG_AHCI_DWC=y
CONFIG_PATA_ATIIXP=y CONFIG_PATA_ATIIXP=y
CONFIG_PATA_PCMCIA=m CONFIG_PATA_PCMCIA=m
CONFIG_MD=y CONFIG_MD=y
...@@ -469,13 +472,11 @@ CONFIG_VIRTIO_NET=m ...@@ -469,13 +472,11 @@ CONFIG_VIRTIO_NET=m
# CONFIG_NET_VENDOR_ARC is not set # CONFIG_NET_VENDOR_ARC is not set
# CONFIG_NET_VENDOR_ATHEROS is not set # CONFIG_NET_VENDOR_ATHEROS is not set
CONFIG_BNX2=y CONFIG_BNX2=y
# CONFIG_NET_VENDOR_BROCADE is not set
# CONFIG_NET_VENDOR_CAVIUM is not set # CONFIG_NET_VENDOR_CAVIUM is not set
CONFIG_CHELSIO_T1=m CONFIG_CHELSIO_T1=m
CONFIG_CHELSIO_T1_1G=y CONFIG_CHELSIO_T1_1G=y
CONFIG_CHELSIO_T3=m CONFIG_CHELSIO_T3=m
CONFIG_CHELSIO_T4=m CONFIG_CHELSIO_T4=m
# CONFIG_NET_VENDOR_CIRRUS is not set
# CONFIG_NET_VENDOR_CISCO is not set # CONFIG_NET_VENDOR_CISCO is not set
# CONFIG_NET_VENDOR_DEC is not set # CONFIG_NET_VENDOR_DEC is not set
# CONFIG_NET_VENDOR_DLINK is not set # CONFIG_NET_VENDOR_DLINK is not set
...@@ -496,6 +497,7 @@ CONFIG_IXGBE=y ...@@ -496,6 +497,7 @@ CONFIG_IXGBE=y
# CONFIG_NET_VENDOR_NVIDIA is not set # CONFIG_NET_VENDOR_NVIDIA is not set
# CONFIG_NET_VENDOR_OKI is not set # CONFIG_NET_VENDOR_OKI is not set
# CONFIG_NET_VENDOR_QLOGIC is not set # CONFIG_NET_VENDOR_QLOGIC is not set
# CONFIG_NET_VENDOR_BROCADE is not set
# CONFIG_NET_VENDOR_QUALCOMM is not set # CONFIG_NET_VENDOR_QUALCOMM is not set
# CONFIG_NET_VENDOR_RDC is not set # CONFIG_NET_VENDOR_RDC is not set
CONFIG_8139CP=m CONFIG_8139CP=m
...@@ -505,9 +507,9 @@ CONFIG_R8169=y ...@@ -505,9 +507,9 @@ CONFIG_R8169=y
# CONFIG_NET_VENDOR_ROCKER is not set # CONFIG_NET_VENDOR_ROCKER is not set
# CONFIG_NET_VENDOR_SAMSUNG is not set # CONFIG_NET_VENDOR_SAMSUNG is not set
# CONFIG_NET_VENDOR_SEEQ is not set # CONFIG_NET_VENDOR_SEEQ is not set
# CONFIG_NET_VENDOR_SOLARFLARE is not set
# CONFIG_NET_VENDOR_SILAN is not set # CONFIG_NET_VENDOR_SILAN is not set
# CONFIG_NET_VENDOR_SIS is not set # CONFIG_NET_VENDOR_SIS is not set
# CONFIG_NET_VENDOR_SOLARFLARE is not set
# CONFIG_NET_VENDOR_SMSC is not set # CONFIG_NET_VENDOR_SMSC is not set
CONFIG_STMMAC_ETH=y CONFIG_STMMAC_ETH=y
# CONFIG_NET_VENDOR_SUN is not set # CONFIG_NET_VENDOR_SUN is not set
...@@ -588,6 +590,7 @@ CONFIG_SERIAL_8250_EXTENDED=y ...@@ -588,6 +590,7 @@ CONFIG_SERIAL_8250_EXTENDED=y
CONFIG_SERIAL_8250_MANY_PORTS=y CONFIG_SERIAL_8250_MANY_PORTS=y
CONFIG_SERIAL_8250_SHARE_IRQ=y CONFIG_SERIAL_8250_SHARE_IRQ=y
CONFIG_SERIAL_8250_RSA=y CONFIG_SERIAL_8250_RSA=y
CONFIG_SERIAL_OF_PLATFORM=y
CONFIG_SERIAL_NONSTANDARD=y CONFIG_SERIAL_NONSTANDARD=y
CONFIG_PRINTER=m CONFIG_PRINTER=m
CONFIG_VIRTIO_CONSOLE=y CONFIG_VIRTIO_CONSOLE=y
...@@ -602,6 +605,11 @@ CONFIG_I2C_GPIO=y ...@@ -602,6 +605,11 @@ CONFIG_I2C_GPIO=y
CONFIG_SPI=y CONFIG_SPI=y
CONFIG_GPIO_SYSFS=y CONFIG_GPIO_SYSFS=y
CONFIG_GPIO_LOONGSON=y CONFIG_GPIO_LOONGSON=y
CONFIG_POWER_RESET=y
CONFIG_POWER_RESET_RESTART=y
CONFIG_POWER_RESET_SYSCON=y
CONFIG_POWER_RESET_SYSCON_POWEROFF=y
CONFIG_SYSCON_REBOOT_MODE=y
CONFIG_SENSORS_LM75=m CONFIG_SENSORS_LM75=m
CONFIG_SENSORS_LM93=m CONFIG_SENSORS_LM93=m
CONFIG_SENSORS_W83795=m CONFIG_SENSORS_W83795=m
...@@ -609,16 +617,16 @@ CONFIG_SENSORS_W83627HF=m ...@@ -609,16 +617,16 @@ CONFIG_SENSORS_W83627HF=m
CONFIG_RC_CORE=m CONFIG_RC_CORE=m
CONFIG_LIRC=y CONFIG_LIRC=y
CONFIG_RC_DECODERS=y CONFIG_RC_DECODERS=y
CONFIG_IR_IMON_DECODER=m
CONFIG_IR_JVC_DECODER=m
CONFIG_IR_MCE_KBD_DECODER=m
CONFIG_IR_NEC_DECODER=m CONFIG_IR_NEC_DECODER=m
CONFIG_IR_RC5_DECODER=m CONFIG_IR_RC5_DECODER=m
CONFIG_IR_RC6_DECODER=m CONFIG_IR_RC6_DECODER=m
CONFIG_IR_JVC_DECODER=m
CONFIG_IR_SONY_DECODER=m
CONFIG_IR_SANYO_DECODER=m CONFIG_IR_SANYO_DECODER=m
CONFIG_IR_SHARP_DECODER=m CONFIG_IR_SHARP_DECODER=m
CONFIG_IR_MCE_KBD_DECODER=m CONFIG_IR_SONY_DECODER=m
CONFIG_IR_XMP_DECODER=m CONFIG_IR_XMP_DECODER=m
CONFIG_IR_IMON_DECODER=m
CONFIG_MEDIA_SUPPORT=m CONFIG_MEDIA_SUPPORT=m
CONFIG_MEDIA_USB_SUPPORT=y CONFIG_MEDIA_USB_SUPPORT=y
CONFIG_USB_VIDEO_CLASS=m CONFIG_USB_VIDEO_CLASS=m
...@@ -638,6 +646,7 @@ CONFIG_DRM_VIRTIO_GPU=m ...@@ -638,6 +646,7 @@ CONFIG_DRM_VIRTIO_GPU=m
CONFIG_FB=y CONFIG_FB=y
CONFIG_FB_EFI=y CONFIG_FB_EFI=y
CONFIG_FB_RADEON=y CONFIG_FB_RADEON=y
CONFIG_LCD_CLASS_DEVICE=y
CONFIG_LCD_PLATFORM=m CONFIG_LCD_PLATFORM=m
# CONFIG_VGA_CONSOLE is not set # CONFIG_VGA_CONSOLE is not set
CONFIG_FRAMEBUFFER_CONSOLE=y CONFIG_FRAMEBUFFER_CONSOLE=y
...@@ -647,7 +656,6 @@ CONFIG_SOUND=y ...@@ -647,7 +656,6 @@ CONFIG_SOUND=y
CONFIG_SND=y CONFIG_SND=y
CONFIG_SND_SEQUENCER=m CONFIG_SND_SEQUENCER=m
CONFIG_SND_SEQ_DUMMY=m CONFIG_SND_SEQ_DUMMY=m
# CONFIG_SND_ISA is not set
CONFIG_SND_BT87X=m CONFIG_SND_BT87X=m
CONFIG_SND_BT87X_OVERCLOCK=y CONFIG_SND_BT87X_OVERCLOCK=y
CONFIG_SND_HDA_INTEL=y CONFIG_SND_HDA_INTEL=y
...@@ -818,10 +826,6 @@ CONFIG_CRYPTO_USER=m ...@@ -818,10 +826,6 @@ CONFIG_CRYPTO_USER=m
# CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set # CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set
CONFIG_CRYPTO_PCRYPT=m CONFIG_CRYPTO_PCRYPT=m
CONFIG_CRYPTO_CRYPTD=m CONFIG_CRYPTO_CRYPTD=m
CONFIG_CRYPTO_CHACHA20POLY1305=m
CONFIG_CRYPTO_HMAC=y
CONFIG_CRYPTO_VMAC=m
CONFIG_CRYPTO_WP512=m
CONFIG_CRYPTO_ANUBIS=m CONFIG_CRYPTO_ANUBIS=m
CONFIG_CRYPTO_BLOWFISH=m CONFIG_CRYPTO_BLOWFISH=m
CONFIG_CRYPTO_CAST5=m CONFIG_CRYPTO_CAST5=m
...@@ -831,6 +835,9 @@ CONFIG_CRYPTO_SEED=m ...@@ -831,6 +835,9 @@ CONFIG_CRYPTO_SEED=m
CONFIG_CRYPTO_SERPENT=m CONFIG_CRYPTO_SERPENT=m
CONFIG_CRYPTO_TEA=m CONFIG_CRYPTO_TEA=m
CONFIG_CRYPTO_TWOFISH=m CONFIG_CRYPTO_TWOFISH=m
CONFIG_CRYPTO_CHACHA20POLY1305=m
CONFIG_CRYPTO_VMAC=m
CONFIG_CRYPTO_WP512=m
CONFIG_CRYPTO_DEFLATE=m CONFIG_CRYPTO_DEFLATE=m
CONFIG_CRYPTO_LZO=m CONFIG_CRYPTO_LZO=m
CONFIG_CRYPTO_842=m CONFIG_CRYPTO_842=m
...@@ -844,6 +851,7 @@ CONFIG_CRYPTO_DEV_VIRTIO=m ...@@ -844,6 +851,7 @@ CONFIG_CRYPTO_DEV_VIRTIO=m
CONFIG_PRINTK_TIME=y CONFIG_PRINTK_TIME=y
CONFIG_STRIP_ASM_SYMS=y CONFIG_STRIP_ASM_SYMS=y
CONFIG_MAGIC_SYSRQ=y CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_FS=y
# CONFIG_SCHED_DEBUG is not set # CONFIG_SCHED_DEBUG is not set
CONFIG_SCHEDSTATS=y CONFIG_SCHEDSTATS=y
# CONFIG_DEBUG_PREEMPT is not set # CONFIG_DEBUG_PREEMPT is not set
......
...@@ -35,4 +35,14 @@ extern struct list_head acpi_wakeup_device_list; ...@@ -35,4 +35,14 @@ extern struct list_head acpi_wakeup_device_list;
#define ACPI_TABLE_UPGRADE_MAX_PHYS ARCH_LOW_ADDRESS_LIMIT #define ACPI_TABLE_UPGRADE_MAX_PHYS ARCH_LOW_ADDRESS_LIMIT
extern int loongarch_acpi_suspend(void);
extern int (*acpi_suspend_lowlevel)(void);
extern void loongarch_suspend_enter(void);
static inline unsigned long acpi_get_wakeup_address(void)
{
extern void loongarch_wakeup_start(void);
return (unsigned long)loongarch_wakeup_start;
}
#endif /* _ASM_LOONGARCH_ACPI_H */ #endif /* _ASM_LOONGARCH_ACPI_H */
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_ALTERNATIVE_ASM_H
#define _ASM_ALTERNATIVE_ASM_H
#ifdef __ASSEMBLY__
#include <asm/asm.h>
/*
* Issue one struct alt_instr descriptor entry (need to put it into
* the section .altinstructions, see below). This entry contains
* enough information for the alternatives patching code to patch an
* instruction. See apply_alternatives().
*/
.macro altinstruction_entry orig alt feature orig_len alt_len
.long \orig - .
.long \alt - .
.short \feature
.byte \orig_len
.byte \alt_len
.endm
/*
* Define an alternative between two instructions. If @feature is
* present, early code in apply_alternatives() replaces @oldinstr with
* @newinstr. ".fill" directive takes care of proper instruction padding
* in case @newinstr is longer than @oldinstr.
*/
.macro ALTERNATIVE oldinstr, newinstr, feature
140 :
\oldinstr
141 :
.fill - (((144f-143f)-(141b-140b)) > 0) * ((144f-143f)-(141b-140b)) / 4, 4, 0x03400000
142 :
.pushsection .altinstructions, "a"
altinstruction_entry 140b, 143f, \feature, 142b-140b, 144f-143f
.popsection
.subsection 1
143 :
\newinstr
144 :
.previous
.endm
#define old_len (141b-140b)
#define new_len1 (144f-143f)
#define new_len2 (145f-144f)
#define alt_max_short(a, b) ((a) ^ (((a) ^ (b)) & -(-((a) < (b)))))
/*
* Same as ALTERNATIVE macro above but for two alternatives. If CPU
* has @feature1, it replaces @oldinstr with @newinstr1. If CPU has
* @feature2, it replaces @oldinstr with @feature2.
*/
.macro ALTERNATIVE_2 oldinstr, newinstr1, feature1, newinstr2, feature2
140 :
\oldinstr
141 :
.fill - ((alt_max_short(new_len1, new_len2) - (old_len)) > 0) * \
(alt_max_short(new_len1, new_len2) - (old_len)) / 4, 4, 0x03400000
142 :
.pushsection .altinstructions, "a"
altinstruction_entry 140b, 143f, \feature1, 142b-140b, 144f-143f, 142b-141b
altinstruction_entry 140b, 144f, \feature2, 142b-140b, 145f-144f, 142b-141b
.popsection
.subsection 1
143 :
\newinstr1
144 :
\newinstr2
145 :
.previous
.endm
#endif /* __ASSEMBLY__ */
#endif /* _ASM_ALTERNATIVE_ASM_H */
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_ALTERNATIVE_H
#define _ASM_ALTERNATIVE_H
#ifndef __ASSEMBLY__
#include <linux/types.h>
#include <linux/stddef.h>
#include <linux/stringify.h>
#include <asm/asm.h>
struct alt_instr {
s32 instr_offset; /* offset to original instruction */
s32 replace_offset; /* offset to replacement instruction */
u16 feature; /* feature bit set for replacement */
u8 instrlen; /* length of original instruction */
u8 replacementlen; /* length of new instruction */
} __packed;
/*
* Debug flag that can be tested to see whether alternative
* instructions were patched in already:
*/
extern int alternatives_patched;
extern struct alt_instr __alt_instructions[], __alt_instructions_end[];
extern void alternative_instructions(void);
extern void apply_alternatives(struct alt_instr *start, struct alt_instr *end);
#define b_replacement(num) "664"#num
#define e_replacement(num) "665"#num
#define alt_end_marker "663"
#define alt_slen "662b-661b"
#define alt_total_slen alt_end_marker"b-661b"
#define alt_rlen(num) e_replacement(num)"f-"b_replacement(num)"f"
#define __OLDINSTR(oldinstr, num) \
"661:\n\t" oldinstr "\n662:\n" \
".fill -(((" alt_rlen(num) ")-(" alt_slen ")) > 0) * " \
"((" alt_rlen(num) ")-(" alt_slen ")) / 4, 4, 0x03400000\n"
#define OLDINSTR(oldinstr, num) \
__OLDINSTR(oldinstr, num) \
alt_end_marker ":\n"
#define alt_max_short(a, b) "((" a ") ^ (((" a ") ^ (" b ")) & -(-((" a ") < (" b ")))))"
/*
* Pad the second replacement alternative with additional NOPs if it is
* additionally longer than the first replacement alternative.
*/
#define OLDINSTR_2(oldinstr, num1, num2) \
"661:\n\t" oldinstr "\n662:\n" \
".fill -((" alt_max_short(alt_rlen(num1), alt_rlen(num2)) " - (" alt_slen ")) > 0) * " \
"(" alt_max_short(alt_rlen(num1), alt_rlen(num2)) " - (" alt_slen ")) / 4, " \
"4, 0x03400000\n" \
alt_end_marker ":\n"
#define ALTINSTR_ENTRY(feature, num) \
" .long 661b - .\n" /* label */ \
" .long " b_replacement(num)"f - .\n" /* new instruction */ \
" .short " __stringify(feature) "\n" /* feature bit */ \
" .byte " alt_total_slen "\n" /* source len */ \
" .byte " alt_rlen(num) "\n" /* replacement len */
#define ALTINSTR_REPLACEMENT(newinstr, feature, num) /* replacement */ \
b_replacement(num)":\n\t" newinstr "\n" e_replacement(num) ":\n\t"
/* alternative assembly primitive: */
#define ALTERNATIVE(oldinstr, newinstr, feature) \
OLDINSTR(oldinstr, 1) \
".pushsection .altinstructions,\"a\"\n" \
ALTINSTR_ENTRY(feature, 1) \
".popsection\n" \
".subsection 1\n" \
ALTINSTR_REPLACEMENT(newinstr, feature, 1) \
".previous\n"
#define ALTERNATIVE_2(oldinstr, newinstr1, feature1, newinstr2, feature2)\
OLDINSTR_2(oldinstr, 1, 2) \
".pushsection .altinstructions,\"a\"\n" \
ALTINSTR_ENTRY(feature1, 1) \
ALTINSTR_ENTRY(feature2, 2) \
".popsection\n" \
".subsection 1\n" \
ALTINSTR_REPLACEMENT(newinstr1, feature1, 1) \
ALTINSTR_REPLACEMENT(newinstr2, feature2, 2) \
".previous\n"
/*
* Alternative instructions for different CPU types or capabilities.
*
* This allows to use optimized instructions even on generic binary
* kernels.
*
* length of oldinstr must be longer or equal the length of newinstr
* It can be padded with nops as needed.
*
* For non barrier like inlines please define new variants
* without volatile and memory clobber.
*/
#define alternative(oldinstr, newinstr, feature) \
(asm volatile (ALTERNATIVE(oldinstr, newinstr, feature) : : : "memory"))
#define alternative_2(oldinstr, newinstr1, feature1, newinstr2, feature2) \
(asm volatile(ALTERNATIVE_2(oldinstr, newinstr1, feature1, newinstr2, feature2) ::: "memory"))
#endif /* __ASSEMBLY__ */
#endif /* _ASM_ALTERNATIVE_H */
/* SPDX-License-Identifier: GPL-2.0-only */
#ifndef __ASM_ASM_EXTABLE_H
#define __ASM_ASM_EXTABLE_H
#define EX_TYPE_NONE 0
#define EX_TYPE_FIXUP 1
#define EX_TYPE_UACCESS_ERR_ZERO 2
#define EX_TYPE_BPF 3
#ifdef __ASSEMBLY__
#define __ASM_EXTABLE_RAW(insn, fixup, type, data) \
.pushsection __ex_table, "a"; \
.balign 4; \
.long ((insn) - .); \
.long ((fixup) - .); \
.short (type); \
.short (data); \
.popsection;
.macro _asm_extable, insn, fixup
__ASM_EXTABLE_RAW(\insn, \fixup, EX_TYPE_FIXUP, 0)
.endm
#else /* __ASSEMBLY__ */
#include <linux/bits.h>
#include <linux/stringify.h>
#include <asm/gpr-num.h>
#define __ASM_EXTABLE_RAW(insn, fixup, type, data) \
".pushsection __ex_table, \"a\"\n" \
".balign 4\n" \
".long ((" insn ") - .)\n" \
".long ((" fixup ") - .)\n" \
".short (" type ")\n" \
".short (" data ")\n" \
".popsection\n"
#define _ASM_EXTABLE(insn, fixup) \
__ASM_EXTABLE_RAW(#insn, #fixup, __stringify(EX_TYPE_FIXUP), "0")
#define EX_DATA_REG_ERR_SHIFT 0
#define EX_DATA_REG_ERR GENMASK(4, 0)
#define EX_DATA_REG_ZERO_SHIFT 5
#define EX_DATA_REG_ZERO GENMASK(9, 5)
#define EX_DATA_REG(reg, gpr) \
"((.L__gpr_num_" #gpr ") << " __stringify(EX_DATA_REG_##reg##_SHIFT) ")"
#define _ASM_EXTABLE_UACCESS_ERR_ZERO(insn, fixup, err, zero) \
__DEFINE_ASM_GPR_NUMS \
__ASM_EXTABLE_RAW(#insn, #fixup, \
__stringify(EX_TYPE_UACCESS_ERR_ZERO), \
"(" \
EX_DATA_REG(ERR, err) " | " \
EX_DATA_REG(ZERO, zero) \
")")
#define _ASM_EXTABLE_UACCESS_ERR(insn, fixup, err) \
_ASM_EXTABLE_UACCESS_ERR_ZERO(insn, fixup, err, zero)
#endif /* __ASSEMBLY__ */
#endif /* __ASM_ASM_EXTABLE_H */
...@@ -32,6 +32,7 @@ struct loongson_system_configuration { ...@@ -32,6 +32,7 @@ struct loongson_system_configuration {
int cores_per_node; int cores_per_node;
int cores_per_package; int cores_per_package;
unsigned long cores_io_master; unsigned long cores_io_master;
unsigned long suspend_addr;
const char *cpuname; const char *cpuname;
}; };
......
/* SPDX-License-Identifier: GPL-2.0 */
/*
* This is included by init/main.c to check for architecture-dependent bugs.
*
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited
*/
#ifndef _ASM_BUGS_H
#define _ASM_BUGS_H
#include <asm/cpu.h>
#include <asm/cpu-info.h>
extern void check_bugs(void);
#endif /* _ASM_BUGS_H */
...@@ -9,6 +9,7 @@ ...@@ -9,6 +9,7 @@
void __init efi_init(void); void __init efi_init(void);
void __init efi_runtime_init(void); void __init efi_runtime_init(void);
void __init *efi_fdt_pointer(void);
void efifb_setup_from_dmi(struct screen_info *si, const char *opt); void efifb_setup_from_dmi(struct screen_info *si, const char *opt);
#define ARCH_EFI_IRQ_FLAGS_MASK 0x00000004 /* Bit 2: CSR.CRMD.IE */ #define ARCH_EFI_IRQ_FLAGS_MASK 0x00000004 /* Bit 2: CSR.CRMD.IE */
......
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_LOONGARCH_EXTABLE_H
#define _ASM_LOONGARCH_EXTABLE_H
/*
* The exception table consists of pairs of relative offsets: the first
* is the relative offset to an instruction that is allowed to fault,
* and the second is the relative offset at which the program should
* continue. No registers are modified, so it is entirely up to the
* continuation code to figure out what to do.
*
* All the routines below use bits of fixup code that are out of line
* with the main instruction path. This means when everything is well,
* we don't even have to jump over them. Further, they do not intrude
* on our cache or tlb entries.
*/
struct exception_table_entry {
int insn, fixup;
short type, data;
};
#define ARCH_HAS_RELATIVE_EXTABLE
#define swap_ex_entry_fixup(a, b, tmp, delta) \
do { \
(a)->fixup = (b)->fixup + (delta); \
(b)->fixup = (tmp).fixup - (delta); \
(a)->type = (b)->type; \
(b)->type = (tmp).type; \
(a)->data = (b)->data; \
(b)->data = (tmp).data; \
} while (0)
#ifdef CONFIG_BPF_JIT
bool ex_handler_bpf(const struct exception_table_entry *ex, struct pt_regs *regs);
#else
static inline
bool ex_handler_bpf(const struct exception_table_entry *ex, struct pt_regs *regs)
{
return false;
}
#endif /* !CONFIG_BPF_JIT */
bool fixup_exception(struct pt_regs *regs);
#endif
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2022 Loongson Technology Corporation Limited
*/
#ifndef _ASM_LOONGARCH_FTRACE_H
#define _ASM_LOONGARCH_FTRACE_H
#define FTRACE_PLT_IDX 0
#define FTRACE_REGS_PLT_IDX 1
#define NR_FTRACE_PLTS 2
#define GRAPH_FAKE_OFFSET (sizeof(struct pt_regs) - offsetof(struct pt_regs, regs[1]))
#ifdef CONFIG_FUNCTION_TRACER
#define MCOUNT_INSN_SIZE 4 /* sizeof mcount call */
#ifndef __ASSEMBLY__
#ifndef CONFIG_DYNAMIC_FTRACE
#define mcount _mcount
extern void _mcount(void);
extern void prepare_ftrace_return(unsigned long self_addr, unsigned long callsite_sp, unsigned long old);
#else
struct dyn_ftrace;
struct dyn_arch_ftrace { };
#define ARCH_SUPPORTS_FTRACE_OPS 1
#define HAVE_FUNCTION_GRAPH_RET_ADDR_PTR
#define ftrace_init_nop ftrace_init_nop
int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec);
static inline unsigned long ftrace_call_adjust(unsigned long addr)
{
return addr;
}
void prepare_ftrace_return(unsigned long self_addr, unsigned long *parent);
#endif /* CONFIG_DYNAMIC_FTRACE */
#ifdef CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS
struct ftrace_ops;
struct ftrace_regs {
struct pt_regs regs;
};
static __always_inline struct pt_regs *arch_ftrace_get_regs(struct ftrace_regs *fregs)
{
return &fregs->regs;
}
#define ftrace_graph_func ftrace_graph_func
void ftrace_graph_func(unsigned long ip, unsigned long parent_ip,
struct ftrace_ops *op, struct ftrace_regs *fregs);
#endif
#endif /* __ASSEMBLY__ */
#endif /* CONFIG_FUNCTION_TRACER */
#endif /* _ASM_LOONGARCH_FTRACE_H */
...@@ -7,6 +7,7 @@ ...@@ -7,6 +7,7 @@
#include <linux/futex.h> #include <linux/futex.h>
#include <linux/uaccess.h> #include <linux/uaccess.h>
#include <asm/asm-extable.h>
#include <asm/barrier.h> #include <asm/barrier.h>
#include <asm/errno.h> #include <asm/errno.h>
...@@ -18,18 +19,11 @@ ...@@ -18,18 +19,11 @@
"2: sc.w $t0, %2 \n" \ "2: sc.w $t0, %2 \n" \
" beqz $t0, 1b \n" \ " beqz $t0, 1b \n" \
"3: \n" \ "3: \n" \
" .section .fixup,\"ax\" \n" \ _ASM_EXTABLE_UACCESS_ERR(1b, 3b, %0) \
"4: li.w %0, %6 \n" \ _ASM_EXTABLE_UACCESS_ERR(2b, 3b, %0) \
" b 3b \n" \
" .previous \n" \
" .section __ex_table,\"a\" \n" \
" "__UA_ADDR "\t1b, 4b \n" \
" "__UA_ADDR "\t2b, 4b \n" \
" .previous \n" \
: "=r" (ret), "=&r" (oldval), \ : "=r" (ret), "=&r" (oldval), \
"=ZC" (*uaddr) \ "=ZC" (*uaddr) \
: "0" (0), "ZC" (*uaddr), "Jr" (oparg), \ : "0" (0), "ZC" (*uaddr), "Jr" (oparg) \
"i" (-EFAULT) \
: "memory", "t0"); \ : "memory", "t0"); \
} }
...@@ -86,17 +80,10 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, u32 oldval, u32 newv ...@@ -86,17 +80,10 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, u32 oldval, u32 newv
" beqz $t0, 1b \n" " beqz $t0, 1b \n"
"3: \n" "3: \n"
__WEAK_LLSC_MB __WEAK_LLSC_MB
" .section .fixup,\"ax\" \n" _ASM_EXTABLE_UACCESS_ERR(1b, 3b, %0)
"4: li.d %0, %6 \n" _ASM_EXTABLE_UACCESS_ERR(2b, 3b, %0)
" b 3b \n"
" .previous \n"
" .section __ex_table,\"a\" \n"
" "__UA_ADDR "\t1b, 4b \n"
" "__UA_ADDR "\t2b, 4b \n"
" .previous \n"
: "+r" (ret), "=&r" (val), "=ZC" (*uaddr) : "+r" (ret), "=&r" (val), "=ZC" (*uaddr)
: "ZC" (*uaddr), "Jr" (oldval), "Jr" (newval), : "ZC" (*uaddr), "Jr" (oldval), "Jr" (newval)
"i" (-EFAULT)
: "memory", "t0"); : "memory", "t0");
*uval = val; *uval = val;
......
/* SPDX-License-Identifier: GPL-2.0-only */
#ifndef __ASM_GPR_NUM_H
#define __ASM_GPR_NUM_H
#ifdef __ASSEMBLY__
.equ .L__gpr_num_zero, 0
.irp num,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31
.equ .L__gpr_num_$r\num, \num
.endr
#else /* __ASSEMBLY__ */
#define __DEFINE_ASM_GPR_NUMS \
" .equ .L__gpr_num_zero, 0\n" \
" .irp num,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31\n" \
" .equ .L__gpr_num_$r\\num, \\num\n" \
" .endr\n" \
#endif /* __ASSEMBLY__ */
#endif /* __ASM_GPR_NUM_H */
...@@ -8,14 +8,17 @@ ...@@ -8,14 +8,17 @@
#include <linux/types.h> #include <linux/types.h>
#include <asm/asm.h> #include <asm/asm.h>
#define INSN_NOP 0x03400000
#define INSN_BREAK 0x002a0000 #define INSN_BREAK 0x002a0000
#define ADDR_IMMMASK_LU52ID 0xFFF0000000000000 #define ADDR_IMMMASK_LU52ID 0xFFF0000000000000
#define ADDR_IMMMASK_LU32ID 0x000FFFFF00000000 #define ADDR_IMMMASK_LU32ID 0x000FFFFF00000000
#define ADDR_IMMMASK_LU12IW 0x00000000FFFFF000
#define ADDR_IMMMASK_ADDU16ID 0x00000000FFFF0000 #define ADDR_IMMMASK_ADDU16ID 0x00000000FFFF0000
#define ADDR_IMMSHIFT_LU52ID 52 #define ADDR_IMMSHIFT_LU52ID 52
#define ADDR_IMMSHIFT_LU32ID 32 #define ADDR_IMMSHIFT_LU32ID 32
#define ADDR_IMMSHIFT_LU12IW 12
#define ADDR_IMMSHIFT_ADDU16ID 16 #define ADDR_IMMSHIFT_ADDU16ID 16
#define ADDR_IMM(addr, INSN) ((addr & ADDR_IMMMASK_##INSN) >> ADDR_IMMSHIFT_##INSN) #define ADDR_IMM(addr, INSN) ((addr & ADDR_IMMMASK_##INSN) >> ADDR_IMMSHIFT_##INSN)
...@@ -28,6 +31,7 @@ enum reg0i26_op { ...@@ -28,6 +31,7 @@ enum reg0i26_op {
enum reg1i20_op { enum reg1i20_op {
lu12iw_op = 0x0a, lu12iw_op = 0x0a,
lu32id_op = 0x0b, lu32id_op = 0x0b,
pcaddi_op = 0x0c,
pcaddu12i_op = 0x0e, pcaddu12i_op = 0x0e,
pcaddu18i_op = 0x0f, pcaddu18i_op = 0x0f,
}; };
...@@ -35,6 +39,8 @@ enum reg1i20_op { ...@@ -35,6 +39,8 @@ enum reg1i20_op {
enum reg1i21_op { enum reg1i21_op {
beqz_op = 0x10, beqz_op = 0x10,
bnez_op = 0x11, bnez_op = 0x11,
bceqz_op = 0x12, /* bits[9:8] = 0x00 */
bcnez_op = 0x12, /* bits[9:8] = 0x01 */
}; };
enum reg2_op { enum reg2_op {
...@@ -76,6 +82,10 @@ enum reg2i12_op { ...@@ -76,6 +82,10 @@ enum reg2i12_op {
ldbu_op = 0xa8, ldbu_op = 0xa8,
ldhu_op = 0xa9, ldhu_op = 0xa9,
ldwu_op = 0xaa, ldwu_op = 0xaa,
flds_op = 0xac,
fsts_op = 0xad,
fldd_op = 0xae,
fstd_op = 0xaf,
}; };
enum reg2i14_op { enum reg2i14_op {
...@@ -146,6 +156,10 @@ enum reg3_op { ...@@ -146,6 +156,10 @@ enum reg3_op {
ldxbu_op = 0x7040, ldxbu_op = 0x7040,
ldxhu_op = 0x7048, ldxhu_op = 0x7048,
ldxwu_op = 0x7050, ldxwu_op = 0x7050,
fldxs_op = 0x7060,
fldxd_op = 0x7068,
fstxs_op = 0x7070,
fstxd_op = 0x7078,
amswapw_op = 0x70c0, amswapw_op = 0x70c0,
amswapd_op = 0x70c1, amswapd_op = 0x70c1,
amaddw_op = 0x70c2, amaddw_op = 0x70c2,
...@@ -307,6 +321,12 @@ static inline bool is_imm_negative(unsigned long val, unsigned int bit) ...@@ -307,6 +321,12 @@ static inline bool is_imm_negative(unsigned long val, unsigned int bit)
return val & (1UL << (bit - 1)); return val & (1UL << (bit - 1));
} }
static inline bool is_pc_ins(union loongarch_instruction *ip)
{
return ip->reg1i20_format.opcode >= pcaddi_op &&
ip->reg1i20_format.opcode <= pcaddu18i_op;
}
static inline bool is_branch_ins(union loongarch_instruction *ip) static inline bool is_branch_ins(union loongarch_instruction *ip)
{ {
return ip->reg1i21_format.opcode >= beqz_op && return ip->reg1i21_format.opcode >= beqz_op &&
...@@ -331,6 +351,18 @@ static inline bool is_stack_alloc_ins(union loongarch_instruction *ip) ...@@ -331,6 +351,18 @@ static inline bool is_stack_alloc_ins(union loongarch_instruction *ip)
is_imm12_negative(ip->reg2i12_format.immediate); is_imm12_negative(ip->reg2i12_format.immediate);
} }
int larch_insn_read(void *addr, u32 *insnp);
int larch_insn_write(void *addr, u32 insn);
int larch_insn_patch_text(void *addr, u32 insn);
u32 larch_insn_gen_nop(void);
u32 larch_insn_gen_b(unsigned long pc, unsigned long dest);
u32 larch_insn_gen_bl(unsigned long pc, unsigned long dest);
u32 larch_insn_gen_or(enum loongarch_gpr rd, enum loongarch_gpr rj, enum loongarch_gpr rk);
u32 larch_insn_gen_move(enum loongarch_gpr rd, enum loongarch_gpr rj);
u32 larch_insn_gen_lu12iw(enum loongarch_gpr rd, int imm);
u32 larch_insn_gen_lu32id(enum loongarch_gpr rd, int imm); u32 larch_insn_gen_lu32id(enum loongarch_gpr rd, int imm);
u32 larch_insn_gen_lu52id(enum loongarch_gpr rd, enum loongarch_gpr rj, int imm); u32 larch_insn_gen_lu52id(enum loongarch_gpr rd, enum loongarch_gpr rj, int imm);
u32 larch_insn_gen_jirl(enum loongarch_gpr rd, enum loongarch_gpr rj, unsigned long pc, unsigned long dest); u32 larch_insn_gen_jirl(enum loongarch_gpr rd, enum loongarch_gpr rj, unsigned long pc, unsigned long dest);
...@@ -345,6 +377,14 @@ static inline bool unsigned_imm_check(unsigned long val, unsigned int bit) ...@@ -345,6 +377,14 @@ static inline bool unsigned_imm_check(unsigned long val, unsigned int bit)
return val < (1UL << bit); return val < (1UL << bit);
} }
static inline unsigned long sign_extend(unsigned long val, unsigned int idx)
{
if (!is_imm_negative(val, idx + 1))
return ((1UL << idx) - 1) & val;
else
return ~((1UL << idx) - 1) | val;
}
#define DEF_EMIT_REG0I26_FORMAT(NAME, OP) \ #define DEF_EMIT_REG0I26_FORMAT(NAME, OP) \
static inline void emit_##NAME(union loongarch_instruction *insn, \ static inline void emit_##NAME(union loongarch_instruction *insn, \
int offset) \ int offset) \
...@@ -566,4 +606,10 @@ static inline void emit_##NAME(union loongarch_instruction *insn, \ ...@@ -566,4 +606,10 @@ static inline void emit_##NAME(union loongarch_instruction *insn, \
DEF_EMIT_REG3SA2_FORMAT(alsld, alsld_op) DEF_EMIT_REG3SA2_FORMAT(alsld, alsld_op)
struct pt_regs;
void emulate_load_store_insn(struct pt_regs *regs, void __user *addr, unsigned int *pc);
unsigned long unaligned_read(void __user *addr, void *value, unsigned long n, bool sign);
unsigned long unaligned_write(void __user *addr, unsigned long value, unsigned long n);
#endif /* _ASM_INST_H */ #endif /* _ASM_INST_H */
...@@ -136,4 +136,7 @@ typedef enum { ...@@ -136,4 +136,7 @@ typedef enum {
#define ls7a_writel(val, addr) *(volatile unsigned int *)TO_UNCACHE(addr) = (val) #define ls7a_writel(val, addr) *(volatile unsigned int *)TO_UNCACHE(addr) = (val)
#define ls7a_writeq(val, addr) *(volatile unsigned long *)TO_UNCACHE(addr) = (val) #define ls7a_writeq(val, addr) *(volatile unsigned long *)TO_UNCACHE(addr) = (val)
void enable_gpe_wakeup(void);
void enable_pci_wakeup(void);
#endif /* __ASM_LOONGSON_H */ #endif /* __ASM_LOONGSON_H */
...@@ -11,7 +11,7 @@ ...@@ -11,7 +11,7 @@
#define RELA_STACK_DEPTH 16 #define RELA_STACK_DEPTH 16
struct mod_section { struct mod_section {
Elf_Shdr *shdr; int shndx;
int num_entries; int num_entries;
int max_entries; int max_entries;
}; };
...@@ -20,6 +20,9 @@ struct mod_arch_specific { ...@@ -20,6 +20,9 @@ struct mod_arch_specific {
struct mod_section got; struct mod_section got;
struct mod_section plt; struct mod_section plt;
struct mod_section plt_idx; struct mod_section plt_idx;
/* For CONFIG_DYNAMIC_FTRACE */
struct plt_entry *ftrace_trampolines;
}; };
struct got_entry { struct got_entry {
...@@ -37,8 +40,8 @@ struct plt_idx_entry { ...@@ -37,8 +40,8 @@ struct plt_idx_entry {
Elf_Addr symbol_addr; Elf_Addr symbol_addr;
}; };
Elf_Addr module_emit_got_entry(struct module *mod, Elf_Addr val); Elf_Addr module_emit_got_entry(struct module *mod, Elf_Shdr *sechdrs, Elf_Addr val);
Elf_Addr module_emit_plt_entry(struct module *mod, Elf_Addr val); Elf_Addr module_emit_plt_entry(struct module *mod, Elf_Shdr *sechdrs, Elf_Addr val);
static inline struct got_entry emit_got_entry(Elf_Addr val) static inline struct got_entry emit_got_entry(Elf_Addr val)
{ {
...@@ -49,7 +52,7 @@ static inline struct plt_entry emit_plt_entry(unsigned long val) ...@@ -49,7 +52,7 @@ static inline struct plt_entry emit_plt_entry(unsigned long val)
{ {
u32 lu12iw, lu32id, lu52id, jirl; u32 lu12iw, lu32id, lu52id, jirl;
lu12iw = (lu12iw_op << 25 | (((val >> 12) & 0xfffff) << 5) | LOONGARCH_GPR_T1); lu12iw = larch_insn_gen_lu12iw(LOONGARCH_GPR_T1, ADDR_IMM(val, LU12IW));
lu32id = larch_insn_gen_lu32id(LOONGARCH_GPR_T1, ADDR_IMM(val, LU32ID)); lu32id = larch_insn_gen_lu32id(LOONGARCH_GPR_T1, ADDR_IMM(val, LU32ID));
lu52id = larch_insn_gen_lu52id(LOONGARCH_GPR_T1, LOONGARCH_GPR_T1, ADDR_IMM(val, LU52ID)); lu52id = larch_insn_gen_lu52id(LOONGARCH_GPR_T1, LOONGARCH_GPR_T1, ADDR_IMM(val, LU52ID));
jirl = larch_insn_gen_jirl(0, LOONGARCH_GPR_T1, 0, (val & 0xfff)); jirl = larch_insn_gen_jirl(0, LOONGARCH_GPR_T1, 0, (val & 0xfff));
...@@ -62,10 +65,10 @@ static inline struct plt_idx_entry emit_plt_idx_entry(unsigned long val) ...@@ -62,10 +65,10 @@ static inline struct plt_idx_entry emit_plt_idx_entry(unsigned long val)
return (struct plt_idx_entry) { val }; return (struct plt_idx_entry) { val };
} }
static inline int get_plt_idx(unsigned long val, const struct mod_section *sec) static inline int get_plt_idx(unsigned long val, Elf_Shdr *sechdrs, const struct mod_section *sec)
{ {
int i; int i;
struct plt_idx_entry *plt_idx = (struct plt_idx_entry *)sec->shdr->sh_addr; struct plt_idx_entry *plt_idx = (struct plt_idx_entry *)sechdrs[sec->shndx].sh_addr;
for (i = 0; i < sec->num_entries; i++) { for (i = 0; i < sec->num_entries; i++) {
if (plt_idx[i].symbol_addr == val) if (plt_idx[i].symbol_addr == val)
...@@ -76,11 +79,12 @@ static inline int get_plt_idx(unsigned long val, const struct mod_section *sec) ...@@ -76,11 +79,12 @@ static inline int get_plt_idx(unsigned long val, const struct mod_section *sec)
} }
static inline struct plt_entry *get_plt_entry(unsigned long val, static inline struct plt_entry *get_plt_entry(unsigned long val,
const struct mod_section *sec_plt, Elf_Shdr *sechdrs,
const struct mod_section *sec_plt_idx) const struct mod_section *sec_plt,
const struct mod_section *sec_plt_idx)
{ {
int plt_idx = get_plt_idx(val, sec_plt_idx); int plt_idx = get_plt_idx(val, sechdrs, sec_plt_idx);
struct plt_entry *plt = (struct plt_entry *)sec_plt->shdr->sh_addr; struct plt_entry *plt = (struct plt_entry *)sechdrs[sec_plt->shndx].sh_addr;
if (plt_idx < 0) if (plt_idx < 0)
return NULL; return NULL;
...@@ -89,10 +93,11 @@ static inline struct plt_entry *get_plt_entry(unsigned long val, ...@@ -89,10 +93,11 @@ static inline struct plt_entry *get_plt_entry(unsigned long val,
} }
static inline struct got_entry *get_got_entry(Elf_Addr val, static inline struct got_entry *get_got_entry(Elf_Addr val,
Elf_Shdr *sechdrs,
const struct mod_section *sec) const struct mod_section *sec)
{ {
struct got_entry *got = (struct got_entry *)sec->shdr->sh_addr;
int i; int i;
struct got_entry *got = (struct got_entry *)sechdrs[sec->shndx].sh_addr;
for (i = 0; i < sec->num_entries; i++) for (i = 0; i < sec->num_entries; i++)
if (got[i].symbol_addr == val) if (got[i].symbol_addr == val)
......
...@@ -5,4 +5,5 @@ SECTIONS { ...@@ -5,4 +5,5 @@ SECTIONS {
.got : { BYTE(0) } .got : { BYTE(0) }
.plt : { BYTE(0) } .plt : { BYTE(0) }
.plt.idx : { BYTE(0) } .plt.idx : { BYTE(0) }
.ftrace_trampoline : { BYTE(0) }
} }
...@@ -13,6 +13,7 @@ ...@@ -13,6 +13,7 @@
extern unsigned long eentry; extern unsigned long eentry;
extern unsigned long tlbrentry; extern unsigned long tlbrentry;
extern char init_command_line[COMMAND_LINE_SIZE];
extern void tlb_init(int cpu); extern void tlb_init(int cpu);
extern void cpu_cache_init(void); extern void cpu_cache_init(void);
extern void cache_error_setup(void); extern void cache_error_setup(void);
......
/* SPDX-License-Identifier: GPL-2.0 */
/*
* GCC stack protector support.
*
* Stack protector works by putting predefined pattern at the start of
* the stack frame and verifying that it hasn't been overwritten when
* returning from the function. The pattern is called stack canary and
* on LoongArch gcc expects it to be defined by a global variable called
* "__stack_chk_guard".
*/
#ifndef _ASM_STACKPROTECTOR_H
#define _ASM_STACKPROTECTOR_H
#include <linux/random.h>
#include <linux/version.h>
extern unsigned long __stack_chk_guard;
/*
* Initialize the stackprotector canary value.
*
* NOTE: this must only be called from functions that never return,
* and it must always be inlined.
*/
static __always_inline void boot_init_stack_canary(void)
{
unsigned long canary;
/* Try to get a semi random initial value. */
get_random_bytes(&canary, sizeof(canary));
canary ^= LINUX_VERSION_CODE;
current->stack_canary = canary;
__stack_chk_guard = current->stack_canary;
}
#endif /* _ASM_STACKPROTECTOR_H */
...@@ -5,8 +5,13 @@ ...@@ -5,8 +5,13 @@
#ifndef _ASM_STRING_H #ifndef _ASM_STRING_H
#define _ASM_STRING_H #define _ASM_STRING_H
#define __HAVE_ARCH_MEMSET
extern void *memset(void *__s, int __c, size_t __count); extern void *memset(void *__s, int __c, size_t __count);
#define __HAVE_ARCH_MEMCPY
extern void *memcpy(void *__to, __const__ void *__from, size_t __n); extern void *memcpy(void *__to, __const__ void *__from, size_t __n);
#define __HAVE_ARCH_MEMMOVE
extern void *memmove(void *__dest, __const__ void *__src, size_t __n); extern void *memmove(void *__dest, __const__ void *__src, size_t __n);
#endif /* _ASM_STRING_H */ #endif /* _ASM_STRING_H */
...@@ -38,7 +38,7 @@ struct thread_info { ...@@ -38,7 +38,7 @@ struct thread_info {
#define INIT_THREAD_INFO(tsk) \ #define INIT_THREAD_INFO(tsk) \
{ \ { \
.task = &tsk, \ .task = &tsk, \
.flags = 0, \ .flags = _TIF_FIXADE, \
.cpu = 0, \ .cpu = 0, \
.preempt_count = INIT_PREEMPT_COUNT, \ .preempt_count = INIT_PREEMPT_COUNT, \
} }
......
...@@ -12,6 +12,7 @@ ...@@ -12,6 +12,7 @@
extern u64 cpu_clock_freq; extern u64 cpu_clock_freq;
extern u64 const_clock_freq; extern u64 const_clock_freq;
extern void save_counter(void);
extern void sync_counter(void); extern void sync_counter(void);
static inline unsigned int calc_const_freq(void) static inline unsigned int calc_const_freq(void)
......
...@@ -15,7 +15,8 @@ ...@@ -15,7 +15,8 @@
#include <linux/string.h> #include <linux/string.h>
#include <linux/extable.h> #include <linux/extable.h>
#include <asm/pgtable.h> #include <asm/pgtable.h>
#include <asm-generic/extable.h> #include <asm/extable.h>
#include <asm/asm-extable.h>
#include <asm-generic/access_ok.h> #include <asm-generic/access_ok.h>
extern u64 __ua_limit; extern u64 __ua_limit;
...@@ -160,16 +161,9 @@ do { \ ...@@ -160,16 +161,9 @@ do { \
__asm__ __volatile__( \ __asm__ __volatile__( \
"1: " insn " %1, %2 \n" \ "1: " insn " %1, %2 \n" \
"2: \n" \ "2: \n" \
" .section .fixup,\"ax\" \n" \ _ASM_EXTABLE_UACCESS_ERR_ZERO(1b, 2b, %0, %1) \
"3: li.w %0, %3 \n" \
" move %1, $zero \n" \
" b 2b \n" \
" .previous \n" \
" .section __ex_table,\"a\" \n" \
" "__UA_ADDR "\t1b, 3b \n" \
" .previous \n" \
: "+r" (__gu_err), "=r" (__gu_tmp) \ : "+r" (__gu_err), "=r" (__gu_tmp) \
: "m" (__m(ptr)), "i" (-EFAULT)); \ : "m" (__m(ptr))); \
\ \
(val) = (__typeof__(*(ptr))) __gu_tmp; \ (val) = (__typeof__(*(ptr))) __gu_tmp; \
} }
...@@ -192,15 +186,9 @@ do { \ ...@@ -192,15 +186,9 @@ do { \
__asm__ __volatile__( \ __asm__ __volatile__( \
"1: " insn " %z2, %1 # __put_user_asm\n" \ "1: " insn " %z2, %1 # __put_user_asm\n" \
"2: \n" \ "2: \n" \
" .section .fixup,\"ax\" \n" \ _ASM_EXTABLE_UACCESS_ERR(1b, 2b, %0) \
"3: li.w %0, %3 \n" \
" b 2b \n" \
" .previous \n" \
" .section __ex_table,\"a\" \n" \
" " __UA_ADDR " 1b, 3b \n" \
" .previous \n" \
: "+r" (__pu_err), "=m" (__m(ptr)) \ : "+r" (__pu_err), "=m" (__m(ptr)) \
: "Jr" (__pu_val), "i" (-EFAULT)); \ : "Jr" (__pu_val)); \
} }
#define __get_kernel_nofault(dst, src, type, err_label) \ #define __get_kernel_nofault(dst, src, type, err_label) \
......
...@@ -20,7 +20,8 @@ struct unwind_state { ...@@ -20,7 +20,8 @@ struct unwind_state {
char type; /* UNWINDER_XXX */ char type; /* UNWINDER_XXX */
struct stack_info stack_info; struct stack_info stack_info;
struct task_struct *task; struct task_struct *task;
bool first, error; bool first, error, is_ftrace;
int graph_idx;
unsigned long sp, pc, ra; unsigned long sp, pc, ra;
}; };
......
...@@ -7,13 +7,27 @@ extra-y := vmlinux.lds ...@@ -7,13 +7,27 @@ extra-y := vmlinux.lds
obj-y += head.o cpu-probe.o cacheinfo.o env.o setup.o entry.o genex.o \ obj-y += head.o cpu-probe.o cacheinfo.o env.o setup.o entry.o genex.o \
traps.o irq.o idle.o process.o dma.o mem.o io.o reset.o switch.o \ traps.o irq.o idle.o process.o dma.o mem.o io.o reset.o switch.o \
elf.o syscall.o signal.o time.o topology.o inst.o ptrace.o vdso.o elf.o syscall.o signal.o time.o topology.o inst.o ptrace.o vdso.o \
alternative.o unaligned.o
obj-$(CONFIG_ACPI) += acpi.o obj-$(CONFIG_ACPI) += acpi.o
obj-$(CONFIG_EFI) += efi.o obj-$(CONFIG_EFI) += efi.o
obj-$(CONFIG_CPU_HAS_FPU) += fpu.o obj-$(CONFIG_CPU_HAS_FPU) += fpu.o
ifdef CONFIG_FUNCTION_TRACER
ifndef CONFIG_DYNAMIC_FTRACE
obj-y += mcount.o ftrace.o
CFLAGS_REMOVE_ftrace.o = $(CC_FLAGS_FTRACE)
else
obj-y += mcount_dyn.o ftrace_dyn.o
CFLAGS_REMOVE_ftrace_dyn.o = $(CC_FLAGS_FTRACE)
endif
CFLAGS_REMOVE_inst.o = $(CC_FLAGS_FTRACE)
CFLAGS_REMOVE_time.o = $(CC_FLAGS_FTRACE)
CFLAGS_REMOVE_perf_event.o = $(CC_FLAGS_FTRACE)
endif
obj-$(CONFIG_MODULES) += module.o module-sections.o obj-$(CONFIG_MODULES) += module.o module-sections.o
obj-$(CONFIG_STACKTRACE) += stacktrace.o obj-$(CONFIG_STACKTRACE) += stacktrace.o
......
...@@ -12,6 +12,7 @@ ...@@ -12,6 +12,7 @@
#include <linux/irq.h> #include <linux/irq.h>
#include <linux/irqdomain.h> #include <linux/irqdomain.h>
#include <linux/memblock.h> #include <linux/memblock.h>
#include <linux/of_fdt.h>
#include <linux/serial_core.h> #include <linux/serial_core.h>
#include <asm/io.h> #include <asm/io.h>
#include <asm/numa.h> #include <asm/numa.h>
...@@ -139,20 +140,26 @@ static void __init acpi_process_madt(void) ...@@ -139,20 +140,26 @@ static void __init acpi_process_madt(void)
loongson_sysconf.nr_cpus = num_processors; loongson_sysconf.nr_cpus = num_processors;
} }
#ifndef CONFIG_SUSPEND
int (*acpi_suspend_lowlevel)(void);
#else
int (*acpi_suspend_lowlevel)(void) = loongarch_acpi_suspend;
#endif
void __init acpi_boot_table_init(void) void __init acpi_boot_table_init(void)
{ {
/* /*
* If acpi_disabled, bail out * If acpi_disabled, bail out
*/ */
if (acpi_disabled) if (acpi_disabled)
return; goto fdt_earlycon;
/* /*
* Initialize the ACPI boot-time table parser. * Initialize the ACPI boot-time table parser.
*/ */
if (acpi_table_init()) { if (acpi_table_init()) {
disable_acpi(); disable_acpi();
return; goto fdt_earlycon;
} }
loongson_sysconf.boot_cpu_id = read_csr_cpuid(); loongson_sysconf.boot_cpu_id = read_csr_cpuid();
...@@ -164,6 +171,12 @@ void __init acpi_boot_table_init(void) ...@@ -164,6 +171,12 @@ void __init acpi_boot_table_init(void)
/* Do not enable ACPI SPCR console by default */ /* Do not enable ACPI SPCR console by default */
acpi_parse_spcr(earlycon_acpi_spcr_enable, false); acpi_parse_spcr(earlycon_acpi_spcr_enable, false);
return;
fdt_earlycon:
if (earlycon_acpi_spcr_enable)
early_init_dt_scan_chosen_stdout();
} }
#ifdef CONFIG_ACPI_NUMA #ifdef CONFIG_ACPI_NUMA
......
// SPDX-License-Identifier: GPL-2.0-only
#include <linux/mm.h>
#include <linux/module.h>
#include <asm/alternative.h>
#include <asm/cacheflush.h>
#include <asm/inst.h>
#include <asm/sections.h>
int __read_mostly alternatives_patched;
EXPORT_SYMBOL_GPL(alternatives_patched);
#define MAX_PATCH_SIZE (((u8)(-1)) / LOONGARCH_INSN_SIZE)
static int __initdata_or_module debug_alternative;
static int __init debug_alt(char *str)
{
debug_alternative = 1;
return 1;
}
__setup("debug-alternative", debug_alt);
#define DPRINTK(fmt, args...) \
do { \
if (debug_alternative) \
printk(KERN_DEBUG "%s: " fmt "\n", __func__, ##args); \
} while (0)
#define DUMP_WORDS(buf, count, fmt, args...) \
do { \
if (unlikely(debug_alternative)) { \
int _j; \
union loongarch_instruction *_buf = buf; \
\
if (!(count)) \
break; \
\
printk(KERN_DEBUG fmt, ##args); \
for (_j = 0; _j < count - 1; _j++) \
printk(KERN_CONT "<%08x> ", _buf[_j].word); \
printk(KERN_CONT "<%08x>\n", _buf[_j].word); \
} \
} while (0)
/* Use this to add nops to a buffer, then text_poke the whole buffer. */
static void __init_or_module add_nops(union loongarch_instruction *insn, int count)
{
while (count--) {
insn->word = INSN_NOP;
insn++;
}
}
/* Is the jump addr in local .altinstructions */
static inline bool in_alt_jump(unsigned long jump, void *start, void *end)
{
return jump >= (unsigned long)start && jump < (unsigned long)end;
}
static void __init_or_module recompute_jump(union loongarch_instruction *buf,
union loongarch_instruction *dest, union loongarch_instruction *src,
void *start, void *end)
{
unsigned int si, si_l, si_h;
unsigned long cur_pc, jump_addr, pc;
long offset;
cur_pc = (unsigned long)src;
pc = (unsigned long)dest;
si_l = src->reg0i26_format.immediate_l;
si_h = src->reg0i26_format.immediate_h;
switch (src->reg0i26_format.opcode) {
case b_op:
case bl_op:
jump_addr = cur_pc + sign_extend((si_h << 16 | si_l) << 2, 27);
if (in_alt_jump(jump_addr, start, end))
return;
offset = jump_addr - pc;
BUG_ON(offset < -SZ_128M || offset >= SZ_128M);
offset >>= 2;
buf->reg0i26_format.immediate_h = offset >> 16;
buf->reg0i26_format.immediate_l = offset;
return;
}
si_l = src->reg1i21_format.immediate_l;
si_h = src->reg1i21_format.immediate_h;
switch (src->reg1i21_format.opcode) {
case bceqz_op: /* bceqz_op = bcnez_op */
BUG_ON(buf->reg1i21_format.rj & BIT(4));
fallthrough;
case beqz_op:
case bnez_op:
jump_addr = cur_pc + sign_extend((si_h << 16 | si_l) << 2, 22);
if (in_alt_jump(jump_addr, start, end))
return;
offset = jump_addr - pc;
BUG_ON(offset < -SZ_4M || offset >= SZ_4M);
offset >>= 2;
buf->reg1i21_format.immediate_h = offset >> 16;
buf->reg1i21_format.immediate_l = offset;
return;
}
si = src->reg2i16_format.immediate;
switch (src->reg2i16_format.opcode) {
case beq_op:
case bne_op:
case blt_op:
case bge_op:
case bltu_op:
case bgeu_op:
jump_addr = cur_pc + sign_extend(si << 2, 17);
if (in_alt_jump(jump_addr, start, end))
return;
offset = jump_addr - pc;
BUG_ON(offset < -SZ_128K || offset >= SZ_128K);
offset >>= 2;
buf->reg2i16_format.immediate = offset;
return;
}
}
static int __init_or_module copy_alt_insns(union loongarch_instruction *buf,
union loongarch_instruction *dest, union loongarch_instruction *src, int nr)
{
int i;
for (i = 0; i < nr; i++) {
buf[i].word = src[i].word;
if (is_pc_ins(&src[i])) {
pr_err("Not support pcrel instruction at present!");
return -EINVAL;
}
if (is_branch_ins(&src[i]) &&
src[i].reg2i16_format.opcode != jirl_op) {
recompute_jump(&buf[i], &dest[i], &src[i], src, src + nr);
}
}
return 0;
}
/*
* text_poke_early - Update instructions on a live kernel at boot time
*
* When you use this code to patch more than one byte of an instruction
* you need to make sure that other CPUs cannot execute this code in parallel.
* Also no thread must be currently preempted in the middle of these
* instructions. And on the local CPU you need to be protected again NMI or MCE
* handlers seeing an inconsistent instruction while you patch.
*/
static void *__init_or_module text_poke_early(union loongarch_instruction *insn,
union loongarch_instruction *buf, unsigned int nr)
{
int i;
unsigned long flags;
local_irq_save(flags);
for (i = 0; i < nr; i++)
insn[i].word = buf[i].word;
local_irq_restore(flags);
wbflush();
flush_icache_range((unsigned long)insn, (unsigned long)(insn + nr));
return insn;
}
/*
* Replace instructions with better alternatives for this CPU type. This runs
* before SMP is initialized to avoid SMP problems with self modifying code.
* This implies that asymmetric systems where APs have less capabilities than
* the boot processor are not handled. Tough. Make sure you disable such
* features by hand.
*/
void __init_or_module apply_alternatives(struct alt_instr *start, struct alt_instr *end)
{
struct alt_instr *a;
unsigned int nr_instr, nr_repl, nr_insnbuf;
union loongarch_instruction *instr, *replacement;
union loongarch_instruction insnbuf[MAX_PATCH_SIZE];
DPRINTK("alt table %px, -> %px", start, end);
/*
* The scan order should be from start to end. A later scanned
* alternative code can overwrite previously scanned alternative code.
* Some kernel functions (e.g. memcpy, memset, etc) use this order to
* patch code.
*
* So be careful if you want to change the scan order to any other
* order.
*/
for (a = start; a < end; a++) {
nr_insnbuf = 0;
instr = (void *)&a->instr_offset + a->instr_offset;
replacement = (void *)&a->replace_offset + a->replace_offset;
BUG_ON(a->instrlen > sizeof(insnbuf));
BUG_ON(a->instrlen & 0x3);
BUG_ON(a->replacementlen & 0x3);
nr_instr = a->instrlen / LOONGARCH_INSN_SIZE;
nr_repl = a->replacementlen / LOONGARCH_INSN_SIZE;
if (!cpu_has(a->feature)) {
DPRINTK("feat not exist: %d, old: (%px len: %d), repl: (%px, len: %d)",
a->feature, instr, a->instrlen,
replacement, a->replacementlen);
continue;
}
DPRINTK("feat: %d, old: (%px len: %d), repl: (%px, len: %d)",
a->feature, instr, a->instrlen,
replacement, a->replacementlen);
DUMP_WORDS(instr, nr_instr, "%px: old_insn: ", instr);
DUMP_WORDS(replacement, nr_repl, "%px: rpl_insn: ", replacement);
copy_alt_insns(insnbuf, instr, replacement, nr_repl);
nr_insnbuf = nr_repl;
if (nr_instr > nr_repl) {
add_nops(insnbuf + nr_repl, nr_instr - nr_repl);
nr_insnbuf += nr_instr - nr_repl;
}
DUMP_WORDS(insnbuf, nr_insnbuf, "%px: final_insn: ", instr);
text_poke_early(instr, insnbuf, nr_insnbuf);
}
}
void __init alternative_instructions(void)
{
apply_alternatives(__alt_instructions, __alt_instructions_end);
alternatives_patched = 1;
}
...@@ -68,6 +68,9 @@ void output_task_defines(void) ...@@ -68,6 +68,9 @@ void output_task_defines(void)
OFFSET(TASK_FLAGS, task_struct, flags); OFFSET(TASK_FLAGS, task_struct, flags);
OFFSET(TASK_MM, task_struct, mm); OFFSET(TASK_MM, task_struct, mm);
OFFSET(TASK_PID, task_struct, pid); OFFSET(TASK_PID, task_struct, pid);
#if defined(CONFIG_STACKPROTECTOR)
OFFSET(TASK_STACK_CANARY, task_struct, stack_canary);
#endif
DEFINE(TASK_STRUCT_SIZE, sizeof(struct task_struct)); DEFINE(TASK_STRUCT_SIZE, sizeof(struct task_struct));
BLANK(); BLANK();
} }
...@@ -257,3 +260,15 @@ void output_smpboot_defines(void) ...@@ -257,3 +260,15 @@ void output_smpboot_defines(void)
BLANK(); BLANK();
} }
#endif #endif
#ifdef CONFIG_HIBERNATION
void output_pbe_defines(void)
{
COMMENT(" Linux struct pbe offsets. ");
OFFSET(PBE_ADDRESS, pbe, address);
OFFSET(PBE_ORIG_ADDRESS, pbe, orig_address);
OFFSET(PBE_NEXT, pbe, next);
DEFINE(PBE_SIZE, sizeof(struct pbe));
BLANK();
}
#endif
...@@ -28,16 +28,29 @@ static unsigned long efi_nr_tables; ...@@ -28,16 +28,29 @@ static unsigned long efi_nr_tables;
static unsigned long efi_config_table; static unsigned long efi_config_table;
static unsigned long __initdata boot_memmap = EFI_INVALID_TABLE_ADDR; static unsigned long __initdata boot_memmap = EFI_INVALID_TABLE_ADDR;
static unsigned long __initdata fdt_pointer = EFI_INVALID_TABLE_ADDR;
static efi_system_table_t *efi_systab; static efi_system_table_t *efi_systab;
static efi_config_table_type_t arch_tables[] __initdata = { static efi_config_table_type_t arch_tables[] __initdata = {
{LINUX_EFI_BOOT_MEMMAP_GUID, &boot_memmap, "MEMMAP" }, {LINUX_EFI_BOOT_MEMMAP_GUID, &boot_memmap, "MEMMAP" },
{DEVICE_TREE_GUID, &fdt_pointer, "FDTPTR" },
{}, {},
}; };
void __init *efi_fdt_pointer(void)
{
if (!efi_systab)
return NULL;
if (fdt_pointer == EFI_INVALID_TABLE_ADDR)
return NULL;
return early_memremap_ro(fdt_pointer, SZ_64K);
}
void __init efi_runtime_init(void) void __init efi_runtime_init(void)
{ {
if (!efi_enabled(EFI_BOOT)) if (!efi_enabled(EFI_BOOT) || !efi_systab->runtime)
return; return;
if (efi_runtime_disabled()) { if (efi_runtime_disabled()) {
......
...@@ -11,6 +11,7 @@ ...@@ -11,6 +11,7 @@
#include <asm/early_ioremap.h> #include <asm/early_ioremap.h>
#include <asm/bootinfo.h> #include <asm/bootinfo.h>
#include <asm/loongson.h> #include <asm/loongson.h>
#include <asm/setup.h>
u64 efi_system_table; u64 efi_system_table;
struct loongson_system_configuration loongson_sysconf; struct loongson_system_configuration loongson_sysconf;
...@@ -27,6 +28,7 @@ void __init init_environ(void) ...@@ -27,6 +28,7 @@ void __init init_environ(void)
clear_bit(EFI_BOOT, &efi.flags); clear_bit(EFI_BOOT, &efi.flags);
strscpy(boot_command_line, cmdline, COMMAND_LINE_SIZE); strscpy(boot_command_line, cmdline, COMMAND_LINE_SIZE);
strscpy(init_command_line, cmdline, COMMAND_LINE_SIZE);
early_memunmap(cmdline, COMMAND_LINE_SIZE); early_memunmap(cmdline, COMMAND_LINE_SIZE);
efi_system_table = fw_arg2; efi_system_table = fw_arg2;
......
...@@ -8,6 +8,7 @@ ...@@ -8,6 +8,7 @@
*/ */
#include <asm/asm.h> #include <asm/asm.h>
#include <asm/asmmacro.h> #include <asm/asmmacro.h>
#include <asm/asm-extable.h>
#include <asm/asm-offsets.h> #include <asm/asm-offsets.h>
#include <asm/errno.h> #include <asm/errno.h>
#include <asm/export.h> #include <asm/export.h>
...@@ -21,9 +22,7 @@ ...@@ -21,9 +22,7 @@
.macro EX insn, reg, src, offs .macro EX insn, reg, src, offs
.ex\@: \insn \reg, \src, \offs .ex\@: \insn \reg, \src, \offs
.section __ex_table,"a" _asm_extable .ex\@, fault
PTR .ex\@, fault
.previous
.endm .endm
.macro sc_save_fp base .macro sc_save_fp base
......
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2022 Loongson Technology Corporation Limited
*/
#include <linux/init.h>
#include <linux/ftrace.h>
#include <linux/syscalls.h>
#include <linux/uaccess.h>
#include <asm/asm.h>
#include <asm/asm-offsets.h>
#include <asm/cacheflush.h>
#include <asm/inst.h>
#include <asm/loongarch.h>
#include <asm/syscall.h>
#include <asm-generic/sections.h>
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
/*
* As `call _mcount` follows LoongArch psABI, ra-saved operation and
* stack operation can be found before this insn.
*/
static int ftrace_get_parent_ra_addr(unsigned long insn_addr, int *ra_off)
{
int limit = 32;
union loongarch_instruction *insn;
insn = (union loongarch_instruction *)insn_addr;
do {
insn--;
limit--;
if (is_ra_save_ins(insn))
*ra_off = -((1 << 12) - insn->reg2i12_format.immediate);
} while (!is_stack_alloc_ins(insn) && limit);
if (!limit)
return -EINVAL;
return 0;
}
void prepare_ftrace_return(unsigned long self_addr,
unsigned long callsite_sp, unsigned long old)
{
int ra_off;
unsigned long return_hooker = (unsigned long)&return_to_handler;
if (unlikely(ftrace_graph_is_dead()))
return;
if (unlikely(atomic_read(&current->tracing_graph_pause)))
return;
if (ftrace_get_parent_ra_addr(self_addr, &ra_off))
goto out;
if (!function_graph_enter(old, self_addr, 0, NULL))
*(unsigned long *)(callsite_sp + ra_off) = return_hooker;
return;
out:
ftrace_graph_stop();
WARN_ON(1);
}
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
// SPDX-License-Identifier: GPL-2.0
/*
* Based on arch/arm64/kernel/ftrace.c
*
* Copyright (C) 2022 Loongson Technology Corporation Limited
*/
#include <linux/ftrace.h>
#include <linux/uaccess.h>
#include <asm/inst.h>
#include <asm/module.h>
static int ftrace_modify_code(unsigned long pc, u32 old, u32 new, bool validate)
{
u32 replaced;
if (validate) {
if (larch_insn_read((void *)pc, &replaced))
return -EFAULT;
if (replaced != old)
return -EINVAL;
}
if (larch_insn_patch_text((void *)pc, new))
return -EPERM;
return 0;
}
#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
#ifdef CONFIG_MODULES
static inline int __get_mod(struct module **mod, unsigned long addr)
{
preempt_disable();
*mod = __module_text_address(addr);
preempt_enable();
if (WARN_ON(!(*mod)))
return -EINVAL;
return 0;
}
static struct plt_entry *get_ftrace_plt(struct module *mod, unsigned long addr)
{
struct plt_entry *plt = mod->arch.ftrace_trampolines;
if (addr == FTRACE_ADDR)
return &plt[FTRACE_PLT_IDX];
if (addr == FTRACE_REGS_ADDR &&
IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_REGS))
return &plt[FTRACE_REGS_PLT_IDX];
return NULL;
}
static unsigned long get_plt_addr(struct module *mod, unsigned long addr)
{
struct plt_entry *plt;
plt = get_ftrace_plt(mod, addr);
if (!plt) {
pr_err("ftrace: no module PLT for %ps\n", (void *)addr);
return -EINVAL;
}
return (unsigned long)plt;
}
#endif
int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr, unsigned long addr)
{
u32 old, new;
unsigned long pc;
long offset __maybe_unused;
pc = rec->ip + LOONGARCH_INSN_SIZE;
#ifdef CONFIG_MODULES
offset = (long)pc - (long)addr;
if (offset < -SZ_128M || offset >= SZ_128M) {
int ret;
struct module *mod;
ret = __get_mod(&mod, pc);
if (ret)
return ret;
addr = get_plt_addr(mod, addr);
old_addr = get_plt_addr(mod, old_addr);
}
#endif
new = larch_insn_gen_bl(pc, addr);
old = larch_insn_gen_bl(pc, old_addr);
return ftrace_modify_code(pc, old, new, true);
}
#endif /* CONFIG_DYNAMIC_FTRACE_WITH_REGS */
int ftrace_update_ftrace_func(ftrace_func_t func)
{
u32 new;
unsigned long pc;
pc = (unsigned long)&ftrace_call;
new = larch_insn_gen_bl(pc, (unsigned long)func);
return ftrace_modify_code(pc, 0, new, false);
}
/*
* The compiler has inserted 2 NOPs before the regular function prologue.
* T series registers are available and safe because of LoongArch's psABI.
*
* At runtime, we can replace nop with bl to enable ftrace call and replace bl
* with nop to disable ftrace call. The bl requires us to save the original RA
* value, so it saves RA at t0 here.
*
* Details are:
*
* | Compiled | Disabled | Enabled |
* +------------+------------------------+------------------------+
* | nop | move t0, ra | move t0, ra |
* | nop | nop | bl ftrace_caller |
* | func_body | func_body | func_body |
*
* The RA value will be recovered by ftrace_regs_entry, and restored into RA
* before returning to the regular function prologue. When a function is not
* being traced, the "move t0, ra" is not harmful.
*/
int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec)
{
u32 old, new;
unsigned long pc;
pc = rec->ip;
old = larch_insn_gen_nop();
new = larch_insn_gen_move(LOONGARCH_GPR_T0, LOONGARCH_GPR_RA);
return ftrace_modify_code(pc, old, new, true);
}
int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
{
u32 old, new;
unsigned long pc;
long offset __maybe_unused;
pc = rec->ip + LOONGARCH_INSN_SIZE;
#ifdef CONFIG_MODULES
offset = (long)pc - (long)addr;
if (offset < -SZ_128M || offset >= SZ_128M) {
int ret;
struct module *mod;
ret = __get_mod(&mod, pc);
if (ret)
return ret;
addr = get_plt_addr(mod, addr);
}
#endif
old = larch_insn_gen_nop();
new = larch_insn_gen_bl(pc, addr);
return ftrace_modify_code(pc, old, new, true);
}
int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, unsigned long addr)
{
u32 old, new;
unsigned long pc;
long offset __maybe_unused;
pc = rec->ip + LOONGARCH_INSN_SIZE;
#ifdef CONFIG_MODULES
offset = (long)pc - (long)addr;
if (offset < -SZ_128M || offset >= SZ_128M) {
int ret;
struct module *mod;
ret = __get_mod(&mod, pc);
if (ret)
return ret;
addr = get_plt_addr(mod, addr);
}
#endif
new = larch_insn_gen_nop();
old = larch_insn_gen_bl(pc, addr);
return ftrace_modify_code(pc, old, new, true);
}
void arch_ftrace_update_code(int command)
{
command |= FTRACE_MAY_SLEEP;
ftrace_modify_all_code(command);
}
int __init ftrace_dyn_arch_init(void)
{
return 0;
}
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
void prepare_ftrace_return(unsigned long self_addr, unsigned long *parent)
{
unsigned long old;
unsigned long return_hooker = (unsigned long)&return_to_handler;
if (unlikely(atomic_read(&current->tracing_graph_pause)))
return;
old = *parent;
if (!function_graph_enter(old, self_addr, 0, parent))
*parent = return_hooker;
}
#ifdef CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS
void ftrace_graph_func(unsigned long ip, unsigned long parent_ip,
struct ftrace_ops *op, struct ftrace_regs *fregs)
{
struct pt_regs *regs = &fregs->regs;
unsigned long *parent = (unsigned long *)&regs->regs[1];
prepare_ftrace_return(ip, (unsigned long *)parent);
}
#else
static int ftrace_modify_graph_caller(bool enable)
{
u32 branch, nop;
unsigned long pc, func;
extern void ftrace_graph_call(void);
pc = (unsigned long)&ftrace_graph_call;
func = (unsigned long)&ftrace_graph_caller;
nop = larch_insn_gen_nop();
branch = larch_insn_gen_b(pc, func);
if (enable)
return ftrace_modify_code(pc, nop, branch, true);
else
return ftrace_modify_code(pc, branch, nop, true);
}
int ftrace_enable_ftrace_graph_caller(void)
{
return ftrace_modify_graph_caller(true);
}
int ftrace_disable_ftrace_graph_caller(void)
{
return ftrace_modify_graph_caller(false);
}
#endif /* CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS */
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
...@@ -2,8 +2,135 @@ ...@@ -2,8 +2,135 @@
/* /*
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
*/ */
#include <linux/sizes.h>
#include <linux/uaccess.h>
#include <asm/cacheflush.h>
#include <asm/inst.h> #include <asm/inst.h>
static DEFINE_RAW_SPINLOCK(patch_lock);
int larch_insn_read(void *addr, u32 *insnp)
{
int ret;
u32 val;
ret = copy_from_kernel_nofault(&val, addr, LOONGARCH_INSN_SIZE);
if (!ret)
*insnp = val;
return ret;
}
int larch_insn_write(void *addr, u32 insn)
{
int ret;
unsigned long flags = 0;
raw_spin_lock_irqsave(&patch_lock, flags);
ret = copy_to_kernel_nofault(addr, &insn, LOONGARCH_INSN_SIZE);
raw_spin_unlock_irqrestore(&patch_lock, flags);
return ret;
}
int larch_insn_patch_text(void *addr, u32 insn)
{
int ret;
u32 *tp = addr;
if ((unsigned long)tp & 3)
return -EINVAL;
ret = larch_insn_write(tp, insn);
if (!ret)
flush_icache_range((unsigned long)tp,
(unsigned long)tp + LOONGARCH_INSN_SIZE);
return ret;
}
u32 larch_insn_gen_nop(void)
{
return INSN_NOP;
}
u32 larch_insn_gen_b(unsigned long pc, unsigned long dest)
{
long offset = dest - pc;
unsigned int immediate_l, immediate_h;
union loongarch_instruction insn;
if ((offset & 3) || offset < -SZ_128M || offset >= SZ_128M) {
pr_warn("The generated b instruction is out of range.\n");
return INSN_BREAK;
}
offset >>= 2;
immediate_l = offset & 0xffff;
offset >>= 16;
immediate_h = offset & 0x3ff;
insn.reg0i26_format.opcode = b_op;
insn.reg0i26_format.immediate_l = immediate_l;
insn.reg0i26_format.immediate_h = immediate_h;
return insn.word;
}
u32 larch_insn_gen_bl(unsigned long pc, unsigned long dest)
{
long offset = dest - pc;
unsigned int immediate_l, immediate_h;
union loongarch_instruction insn;
if ((offset & 3) || offset < -SZ_128M || offset >= SZ_128M) {
pr_warn("The generated bl instruction is out of range.\n");
return INSN_BREAK;
}
offset >>= 2;
immediate_l = offset & 0xffff;
offset >>= 16;
immediate_h = offset & 0x3ff;
insn.reg0i26_format.opcode = bl_op;
insn.reg0i26_format.immediate_l = immediate_l;
insn.reg0i26_format.immediate_h = immediate_h;
return insn.word;
}
u32 larch_insn_gen_or(enum loongarch_gpr rd, enum loongarch_gpr rj, enum loongarch_gpr rk)
{
union loongarch_instruction insn;
insn.reg3_format.opcode = or_op;
insn.reg3_format.rd = rd;
insn.reg3_format.rj = rj;
insn.reg3_format.rk = rk;
return insn.word;
}
u32 larch_insn_gen_move(enum loongarch_gpr rd, enum loongarch_gpr rj)
{
return larch_insn_gen_or(rd, rj, 0);
}
u32 larch_insn_gen_lu12iw(enum loongarch_gpr rd, int imm)
{
union loongarch_instruction insn;
insn.reg1i20_format.opcode = lu12iw_op;
insn.reg1i20_format.rd = rd;
insn.reg1i20_format.immediate = imm;
return insn.word;
}
u32 larch_insn_gen_lu32id(enum loongarch_gpr rd, int imm) u32 larch_insn_gen_lu32id(enum loongarch_gpr rd, int imm)
{ {
union loongarch_instruction insn; union loongarch_instruction insn;
......
/* SPDX-License-Identifier: GPL-2.0 */
/*
* LoongArch specific _mcount support
*
* Copyright (C) 2022 Loongson Technology Corporation Limited
*/
#include <asm/export.h>
#include <asm/ftrace.h>
#include <asm/regdef.h>
#include <asm/stackframe.h>
.text
#define MCOUNT_S0_OFFSET (0)
#define MCOUNT_RA_OFFSET (SZREG)
#define MCOUNT_STACK_SIZE (2 * SZREG)
.macro MCOUNT_SAVE_REGS
PTR_ADDI sp, sp, -MCOUNT_STACK_SIZE
PTR_S s0, sp, MCOUNT_S0_OFFSET
PTR_S ra, sp, MCOUNT_RA_OFFSET
move s0, a0
.endm
.macro MCOUNT_RESTORE_REGS
move a0, s0
PTR_L ra, sp, MCOUNT_RA_OFFSET
PTR_L s0, sp, MCOUNT_S0_OFFSET
PTR_ADDI sp, sp, MCOUNT_STACK_SIZE
.endm
SYM_FUNC_START(_mcount)
la.pcrel t1, ftrace_stub
la.pcrel t2, ftrace_trace_function /* Prepare t2 for (1) */
PTR_L t2, t2, 0
beq t1, t2, fgraph_trace
MCOUNT_SAVE_REGS
move a0, ra /* arg0: self return address */
move a1, s0 /* arg1: parent's return address */
jirl ra, t2, 0 /* (1) call *ftrace_trace_function */
MCOUNT_RESTORE_REGS
fgraph_trace:
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
la.pcrel t1, ftrace_stub
la.pcrel t3, ftrace_graph_return
PTR_L t3, t3, 0
bne t1, t3, ftrace_graph_caller
la.pcrel t1, ftrace_graph_entry_stub
la.pcrel t3, ftrace_graph_entry
PTR_L t3, t3, 0
bne t1, t3, ftrace_graph_caller
#endif
SYM_INNER_LABEL(ftrace_stub, SYM_L_GLOBAL)
jr ra
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
SYM_INNER_LABEL(ftrace_graph_func, SYM_L_GLOBAL)
bl ftrace_stub
#endif
SYM_FUNC_END(_mcount)
EXPORT_SYMBOL(_mcount)
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
SYM_FUNC_START(ftrace_graph_caller)
MCOUNT_SAVE_REGS
PTR_ADDI a0, ra, -4 /* arg0: Callsite self return addr */
PTR_ADDI a1, sp, MCOUNT_STACK_SIZE /* arg1: Callsite sp */
move a2, s0 /* arg2: Callsite parent ra */
bl prepare_ftrace_return
MCOUNT_RESTORE_REGS
jr ra
SYM_FUNC_END(ftrace_graph_caller)
SYM_FUNC_START(return_to_handler)
PTR_ADDI sp, sp, -2 * SZREG
PTR_S a0, sp, 0
PTR_S a1, sp, SZREG
bl ftrace_return_to_handler
/* Restore the real parent address: a0 -> ra */
move ra, a0
PTR_L a0, sp, 0
PTR_L a1, sp, SZREG
PTR_ADDI sp, sp, 2 * SZREG
jr ra
SYM_FUNC_END(return_to_handler)
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2022 Loongson Technology Corporation Limited
*/
#include <asm/export.h>
#include <asm/ftrace.h>
#include <asm/regdef.h>
#include <asm/stackframe.h>
.text
/*
* Due to -fpatchable-function-entry=2: the compiler inserted 2 NOPs before the
* regular C function prologue. When PC arrived here, the last 2 instructions
* are as follows:
* move t0, ra
* bl callsite (for modules, callsite is a tramplione)
*
* modules trampoline is as follows:
* lu12i.w t1, callsite[31:12]
* lu32i.d t1, callsite[51:32]
* lu52i.d t1, t1, callsite[63:52]
* jirl zero, t1, callsite[11:0] >> 2
*
* See arch/loongarch/kernel/ftrace_dyn.c for details. Here, pay attention to
* that the T series regs are available and safe because each C functions
* follows the LoongArch's psABI as well.
*/
.macro ftrace_regs_entry allregs=0
PTR_ADDI sp, sp, -PT_SIZE
PTR_S t0, sp, PT_R1 /* Save parent ra at PT_R1(RA) */
PTR_S a0, sp, PT_R4
PTR_S a1, sp, PT_R5
PTR_S a2, sp, PT_R6
PTR_S a3, sp, PT_R7
PTR_S a4, sp, PT_R8
PTR_S a5, sp, PT_R9
PTR_S a6, sp, PT_R10
PTR_S a7, sp, PT_R11
PTR_S fp, sp, PT_R22
.if \allregs
PTR_S tp, sp, PT_R2
PTR_S t0, sp, PT_R12
PTR_S t1, sp, PT_R13
PTR_S t2, sp, PT_R14
PTR_S t3, sp, PT_R15
PTR_S t4, sp, PT_R16
PTR_S t5, sp, PT_R17
PTR_S t6, sp, PT_R18
PTR_S t7, sp, PT_R19
PTR_S t8, sp, PT_R20
PTR_S u0, sp, PT_R21
PTR_S s0, sp, PT_R23
PTR_S s1, sp, PT_R24
PTR_S s2, sp, PT_R25
PTR_S s3, sp, PT_R26
PTR_S s4, sp, PT_R27
PTR_S s5, sp, PT_R28
PTR_S s6, sp, PT_R29
PTR_S s7, sp, PT_R30
PTR_S s8, sp, PT_R31
/* Clear it for later use as a flag sometimes. */
PTR_S zero, sp, PT_R0
.endif
PTR_S ra, sp, PT_ERA /* Save trace function ra at PT_ERA */
PTR_ADDI t8, sp, PT_SIZE
PTR_S t8, sp, PT_R3
.endm
SYM_FUNC_START(ftrace_stub)
jr ra
SYM_FUNC_END(ftrace_stub)
SYM_CODE_START(ftrace_common)
PTR_ADDI a0, ra, -8 /* arg0: ip */
move a1, t0 /* arg1: parent_ip */
la.pcrel t1, function_trace_op
PTR_L a2, t1, 0 /* arg2: op */
move a3, sp /* arg3: regs */
SYM_INNER_LABEL(ftrace_call, SYM_L_GLOBAL)
bl ftrace_stub
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
SYM_INNER_LABEL(ftrace_graph_call, SYM_L_GLOBAL)
nop /* b ftrace_graph_caller */
#endif
/*
* As we didn't use S series regs in this assmembly code and all calls
* are C function which will save S series regs by themselves, there is
* no need to restore S series regs. The T series is available and safe
* at the callsite, so there is no need to restore the T series regs.
*/
ftrace_common_return:
PTR_L ra, sp, PT_R1
PTR_L a0, sp, PT_R4
PTR_L a1, sp, PT_R5
PTR_L a2, sp, PT_R6
PTR_L a3, sp, PT_R7
PTR_L a4, sp, PT_R8
PTR_L a5, sp, PT_R9
PTR_L a6, sp, PT_R10
PTR_L a7, sp, PT_R11
PTR_L fp, sp, PT_R22
PTR_L t0, sp, PT_ERA
PTR_ADDI sp, sp, PT_SIZE
jr t0
SYM_CODE_END(ftrace_common)
SYM_CODE_START(ftrace_caller)
ftrace_regs_entry allregs=0
b ftrace_common
SYM_CODE_END(ftrace_caller)
#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
SYM_CODE_START(ftrace_regs_caller)
ftrace_regs_entry allregs=1
b ftrace_common
SYM_CODE_END(ftrace_regs_caller)
#endif
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
SYM_CODE_START(ftrace_graph_caller)
PTR_L a0, sp, PT_ERA
PTR_ADDI a0, a0, -8 /* arg0: self_addr */
PTR_ADDI a1, sp, PT_R1 /* arg1: parent */
bl prepare_ftrace_return
b ftrace_common_return
SYM_CODE_END(ftrace_graph_caller)
SYM_CODE_START(return_to_handler)
/* Save return value regs */
PTR_ADDI sp, sp, -2 * SZREG
PTR_S a0, sp, 0
PTR_S a1, sp, SZREG
move a0, zero
bl ftrace_return_to_handler
move ra, a0
/* Restore return value regs */
PTR_L a0, sp, 0
PTR_L a1, sp, SZREG
PTR_ADDI sp, sp, 2 * SZREG
jr ra
SYM_CODE_END(return_to_handler)
#endif
...@@ -6,18 +6,19 @@ ...@@ -6,18 +6,19 @@
#include <linux/elf.h> #include <linux/elf.h>
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/ftrace.h>
Elf_Addr module_emit_got_entry(struct module *mod, Elf_Addr val) Elf_Addr module_emit_got_entry(struct module *mod, Elf_Shdr *sechdrs, Elf_Addr val)
{ {
struct mod_section *got_sec = &mod->arch.got; struct mod_section *got_sec = &mod->arch.got;
int i = got_sec->num_entries; int i = got_sec->num_entries;
struct got_entry *got = get_got_entry(val, got_sec); struct got_entry *got = get_got_entry(val, sechdrs, got_sec);
if (got) if (got)
return (Elf_Addr)got; return (Elf_Addr)got;
/* There is no GOT entry for val yet, create a new one. */ /* There is no GOT entry for val yet, create a new one. */
got = (struct got_entry *)got_sec->shdr->sh_addr; got = (struct got_entry *)sechdrs[got_sec->shndx].sh_addr;
got[i] = emit_got_entry(val); got[i] = emit_got_entry(val);
got_sec->num_entries++; got_sec->num_entries++;
...@@ -33,12 +34,12 @@ Elf_Addr module_emit_got_entry(struct module *mod, Elf_Addr val) ...@@ -33,12 +34,12 @@ Elf_Addr module_emit_got_entry(struct module *mod, Elf_Addr val)
return (Elf_Addr)&got[i]; return (Elf_Addr)&got[i];
} }
Elf_Addr module_emit_plt_entry(struct module *mod, Elf_Addr val) Elf_Addr module_emit_plt_entry(struct module *mod, Elf_Shdr *sechdrs, Elf_Addr val)
{ {
int nr; int nr;
struct mod_section *plt_sec = &mod->arch.plt; struct mod_section *plt_sec = &mod->arch.plt;
struct mod_section *plt_idx_sec = &mod->arch.plt_idx; struct mod_section *plt_idx_sec = &mod->arch.plt_idx;
struct plt_entry *plt = get_plt_entry(val, plt_sec, plt_idx_sec); struct plt_entry *plt = get_plt_entry(val, sechdrs, plt_sec, plt_idx_sec);
struct plt_idx_entry *plt_idx; struct plt_idx_entry *plt_idx;
if (plt) if (plt)
...@@ -47,9 +48,9 @@ Elf_Addr module_emit_plt_entry(struct module *mod, Elf_Addr val) ...@@ -47,9 +48,9 @@ Elf_Addr module_emit_plt_entry(struct module *mod, Elf_Addr val)
nr = plt_sec->num_entries; nr = plt_sec->num_entries;
/* There is no duplicate entry, create a new one */ /* There is no duplicate entry, create a new one */
plt = (struct plt_entry *)plt_sec->shdr->sh_addr; plt = (struct plt_entry *)sechdrs[plt_sec->shndx].sh_addr;
plt[nr] = emit_plt_entry(val); plt[nr] = emit_plt_entry(val);
plt_idx = (struct plt_idx_entry *)plt_idx_sec->shdr->sh_addr; plt_idx = (struct plt_idx_entry *)sechdrs[plt_idx_sec->shndx].sh_addr;
plt_idx[nr] = emit_plt_idx_entry(val); plt_idx[nr] = emit_plt_idx_entry(val);
plt_sec->num_entries++; plt_sec->num_entries++;
...@@ -103,28 +104,31 @@ int module_frob_arch_sections(Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, ...@@ -103,28 +104,31 @@ int module_frob_arch_sections(Elf_Ehdr *ehdr, Elf_Shdr *sechdrs,
char *secstrings, struct module *mod) char *secstrings, struct module *mod)
{ {
unsigned int i, num_plts = 0, num_gots = 0; unsigned int i, num_plts = 0, num_gots = 0;
Elf_Shdr *got_sec, *plt_sec, *plt_idx_sec, *tramp = NULL;
/* /*
* Find the empty .plt sections. * Find the empty .plt sections.
*/ */
for (i = 0; i < ehdr->e_shnum; i++) { for (i = 0; i < ehdr->e_shnum; i++) {
if (!strcmp(secstrings + sechdrs[i].sh_name, ".got")) if (!strcmp(secstrings + sechdrs[i].sh_name, ".got"))
mod->arch.got.shdr = sechdrs + i; mod->arch.got.shndx = i;
else if (!strcmp(secstrings + sechdrs[i].sh_name, ".plt")) else if (!strcmp(secstrings + sechdrs[i].sh_name, ".plt"))
mod->arch.plt.shdr = sechdrs + i; mod->arch.plt.shndx = i;
else if (!strcmp(secstrings + sechdrs[i].sh_name, ".plt.idx")) else if (!strcmp(secstrings + sechdrs[i].sh_name, ".plt.idx"))
mod->arch.plt_idx.shdr = sechdrs + i; mod->arch.plt_idx.shndx = i;
else if (!strcmp(secstrings + sechdrs[i].sh_name, ".ftrace_trampoline"))
tramp = sechdrs + i;
} }
if (!mod->arch.got.shdr) { if (!mod->arch.got.shndx) {
pr_err("%s: module GOT section(s) missing\n", mod->name); pr_err("%s: module GOT section(s) missing\n", mod->name);
return -ENOEXEC; return -ENOEXEC;
} }
if (!mod->arch.plt.shdr) { if (!mod->arch.plt.shndx) {
pr_err("%s: module PLT section(s) missing\n", mod->name); pr_err("%s: module PLT section(s) missing\n", mod->name);
return -ENOEXEC; return -ENOEXEC;
} }
if (!mod->arch.plt_idx.shdr) { if (!mod->arch.plt_idx.shndx) {
pr_err("%s: module PLT.IDX section(s) missing\n", mod->name); pr_err("%s: module PLT.IDX section(s) missing\n", mod->name);
return -ENOEXEC; return -ENOEXEC;
} }
...@@ -145,26 +149,36 @@ int module_frob_arch_sections(Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, ...@@ -145,26 +149,36 @@ int module_frob_arch_sections(Elf_Ehdr *ehdr, Elf_Shdr *sechdrs,
count_max_entries(relas, num_rela, &num_plts, &num_gots); count_max_entries(relas, num_rela, &num_plts, &num_gots);
} }
mod->arch.got.shdr->sh_type = SHT_NOBITS; got_sec = sechdrs + mod->arch.got.shndx;
mod->arch.got.shdr->sh_flags = SHF_ALLOC; got_sec->sh_type = SHT_NOBITS;
mod->arch.got.shdr->sh_addralign = L1_CACHE_BYTES; got_sec->sh_flags = SHF_ALLOC;
mod->arch.got.shdr->sh_size = (num_gots + 1) * sizeof(struct got_entry); got_sec->sh_addralign = L1_CACHE_BYTES;
got_sec->sh_size = (num_gots + 1) * sizeof(struct got_entry);
mod->arch.got.num_entries = 0; mod->arch.got.num_entries = 0;
mod->arch.got.max_entries = num_gots; mod->arch.got.max_entries = num_gots;
mod->arch.plt.shdr->sh_type = SHT_NOBITS; plt_sec = sechdrs + mod->arch.plt.shndx;
mod->arch.plt.shdr->sh_flags = SHF_EXECINSTR | SHF_ALLOC; plt_sec->sh_type = SHT_NOBITS;
mod->arch.plt.shdr->sh_addralign = L1_CACHE_BYTES; plt_sec->sh_flags = SHF_EXECINSTR | SHF_ALLOC;
mod->arch.plt.shdr->sh_size = (num_plts + 1) * sizeof(struct plt_entry); plt_sec->sh_addralign = L1_CACHE_BYTES;
plt_sec->sh_size = (num_plts + 1) * sizeof(struct plt_entry);
mod->arch.plt.num_entries = 0; mod->arch.plt.num_entries = 0;
mod->arch.plt.max_entries = num_plts; mod->arch.plt.max_entries = num_plts;
mod->arch.plt_idx.shdr->sh_type = SHT_NOBITS; plt_idx_sec = sechdrs + mod->arch.plt_idx.shndx;
mod->arch.plt_idx.shdr->sh_flags = SHF_ALLOC; plt_idx_sec->sh_type = SHT_NOBITS;
mod->arch.plt_idx.shdr->sh_addralign = L1_CACHE_BYTES; plt_idx_sec->sh_flags = SHF_ALLOC;
mod->arch.plt_idx.shdr->sh_size = (num_plts + 1) * sizeof(struct plt_idx_entry); plt_idx_sec->sh_addralign = L1_CACHE_BYTES;
plt_idx_sec->sh_size = (num_plts + 1) * sizeof(struct plt_idx_entry);
mod->arch.plt_idx.num_entries = 0; mod->arch.plt_idx.num_entries = 0;
mod->arch.plt_idx.max_entries = num_plts; mod->arch.plt_idx.max_entries = num_plts;
if (tramp) {
tramp->sh_type = SHT_NOBITS;
tramp->sh_flags = SHF_EXECINSTR | SHF_ALLOC;
tramp->sh_addralign = __alignof__(struct plt_entry);
tramp->sh_size = NR_FTRACE_PLTS * sizeof(struct plt_entry);
}
return 0; return 0;
} }
...@@ -15,8 +15,11 @@ ...@@ -15,8 +15,11 @@
#include <linux/vmalloc.h> #include <linux/vmalloc.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/fs.h> #include <linux/fs.h>
#include <linux/ftrace.h>
#include <linux/string.h> #include <linux/string.h>
#include <linux/kernel.h> #include <linux/kernel.h>
#include <asm/alternative.h>
#include <asm/inst.h>
static int rela_stack_push(s64 stack_value, s64 *rela_stack, size_t *rela_stack_top) static int rela_stack_push(s64 stack_value, s64 *rela_stack, size_t *rela_stack_top)
{ {
...@@ -98,16 +101,17 @@ static int apply_r_larch_sop_push_dup(struct module *mod, u32 *location, Elf_Add ...@@ -98,16 +101,17 @@ static int apply_r_larch_sop_push_dup(struct module *mod, u32 *location, Elf_Add
return 0; return 0;
} }
static int apply_r_larch_sop_push_plt_pcrel(struct module *mod, u32 *location, Elf_Addr v, static int apply_r_larch_sop_push_plt_pcrel(struct module *mod,
Elf_Shdr *sechdrs, u32 *location, Elf_Addr v,
s64 *rela_stack, size_t *rela_stack_top, unsigned int type) s64 *rela_stack, size_t *rela_stack_top, unsigned int type)
{ {
ptrdiff_t offset = (void *)v - (void *)location; ptrdiff_t offset = (void *)v - (void *)location;
if (offset >= SZ_128M) if (offset >= SZ_128M)
v = module_emit_plt_entry(mod, v); v = module_emit_plt_entry(mod, sechdrs, v);
if (offset < -SZ_128M) if (offset < -SZ_128M)
v = module_emit_plt_entry(mod, v); v = module_emit_plt_entry(mod, sechdrs, v);
return apply_r_larch_sop_push_pcrel(mod, location, v, rela_stack, rela_stack_top, type); return apply_r_larch_sop_push_pcrel(mod, location, v, rela_stack, rela_stack_top, type);
} }
...@@ -271,17 +275,18 @@ static int apply_r_larch_add_sub(struct module *mod, u32 *location, Elf_Addr v, ...@@ -271,17 +275,18 @@ static int apply_r_larch_add_sub(struct module *mod, u32 *location, Elf_Addr v,
} }
} }
static int apply_r_larch_b26(struct module *mod, u32 *location, Elf_Addr v, static int apply_r_larch_b26(struct module *mod,
Elf_Shdr *sechdrs, u32 *location, Elf_Addr v,
s64 *rela_stack, size_t *rela_stack_top, unsigned int type) s64 *rela_stack, size_t *rela_stack_top, unsigned int type)
{ {
ptrdiff_t offset = (void *)v - (void *)location; ptrdiff_t offset = (void *)v - (void *)location;
union loongarch_instruction *insn = (union loongarch_instruction *)location; union loongarch_instruction *insn = (union loongarch_instruction *)location;
if (offset >= SZ_128M) if (offset >= SZ_128M)
v = module_emit_plt_entry(mod, v); v = module_emit_plt_entry(mod, sechdrs, v);
if (offset < -SZ_128M) if (offset < -SZ_128M)
v = module_emit_plt_entry(mod, v); v = module_emit_plt_entry(mod, sechdrs, v);
offset = (void *)v - (void *)location; offset = (void *)v - (void *)location;
...@@ -338,10 +343,11 @@ static int apply_r_larch_pcala(struct module *mod, u32 *location, Elf_Addr v, ...@@ -338,10 +343,11 @@ static int apply_r_larch_pcala(struct module *mod, u32 *location, Elf_Addr v,
return 0; return 0;
} }
static int apply_r_larch_got_pc(struct module *mod, u32 *location, Elf_Addr v, static int apply_r_larch_got_pc(struct module *mod,
Elf_Shdr *sechdrs, u32 *location, Elf_Addr v,
s64 *rela_stack, size_t *rela_stack_top, unsigned int type) s64 *rela_stack, size_t *rela_stack_top, unsigned int type)
{ {
Elf_Addr got = module_emit_got_entry(mod, v); Elf_Addr got = module_emit_got_entry(mod, sechdrs, v);
if (!got) if (!got)
return -EINVAL; return -EINVAL;
...@@ -386,13 +392,10 @@ static reloc_rela_handler reloc_rela_handlers[] = { ...@@ -386,13 +392,10 @@ static reloc_rela_handler reloc_rela_handlers[] = {
[R_LARCH_SOP_PUSH_PCREL] = apply_r_larch_sop_push_pcrel, [R_LARCH_SOP_PUSH_PCREL] = apply_r_larch_sop_push_pcrel,
[R_LARCH_SOP_PUSH_ABSOLUTE] = apply_r_larch_sop_push_absolute, [R_LARCH_SOP_PUSH_ABSOLUTE] = apply_r_larch_sop_push_absolute,
[R_LARCH_SOP_PUSH_DUP] = apply_r_larch_sop_push_dup, [R_LARCH_SOP_PUSH_DUP] = apply_r_larch_sop_push_dup,
[R_LARCH_SOP_PUSH_PLT_PCREL] = apply_r_larch_sop_push_plt_pcrel,
[R_LARCH_SOP_SUB ... R_LARCH_SOP_IF_ELSE] = apply_r_larch_sop, [R_LARCH_SOP_SUB ... R_LARCH_SOP_IF_ELSE] = apply_r_larch_sop,
[R_LARCH_SOP_POP_32_S_10_5 ... R_LARCH_SOP_POP_32_U] = apply_r_larch_sop_imm_field, [R_LARCH_SOP_POP_32_S_10_5 ... R_LARCH_SOP_POP_32_U] = apply_r_larch_sop_imm_field,
[R_LARCH_ADD32 ... R_LARCH_SUB64] = apply_r_larch_add_sub, [R_LARCH_ADD32 ... R_LARCH_SUB64] = apply_r_larch_add_sub,
[R_LARCH_B26] = apply_r_larch_b26,
[R_LARCH_PCALA_HI20...R_LARCH_PCALA64_HI12] = apply_r_larch_pcala, [R_LARCH_PCALA_HI20...R_LARCH_PCALA64_HI12] = apply_r_larch_pcala,
[R_LARCH_GOT_PC_HI20...R_LARCH_GOT_PC_LO12] = apply_r_larch_got_pc,
}; };
int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab, int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab,
...@@ -443,7 +446,22 @@ int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab, ...@@ -443,7 +446,22 @@ int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab,
sym->st_value, rel[i].r_addend, (u64)location); sym->st_value, rel[i].r_addend, (u64)location);
v = sym->st_value + rel[i].r_addend; v = sym->st_value + rel[i].r_addend;
err = handler(mod, location, v, rela_stack, &rela_stack_top, type); switch (type) {
case R_LARCH_B26:
err = apply_r_larch_b26(mod, sechdrs, location,
v, rela_stack, &rela_stack_top, type);
break;
case R_LARCH_GOT_PC_HI20...R_LARCH_GOT_PC_LO12:
err = apply_r_larch_got_pc(mod, sechdrs, location,
v, rela_stack, &rela_stack_top, type);
break;
case R_LARCH_SOP_PUSH_PLT_PCREL:
err = apply_r_larch_sop_push_plt_pcrel(mod, sechdrs, location,
v, rela_stack, &rela_stack_top, type);
break;
default:
err = handler(mod, location, v, rela_stack, &rela_stack_top, type);
}
if (err) if (err)
return err; return err;
} }
...@@ -456,3 +474,36 @@ void *module_alloc(unsigned long size) ...@@ -456,3 +474,36 @@ void *module_alloc(unsigned long size)
return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END, return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
GFP_KERNEL, PAGE_KERNEL, 0, NUMA_NO_NODE, __builtin_return_address(0)); GFP_KERNEL, PAGE_KERNEL, 0, NUMA_NO_NODE, __builtin_return_address(0));
} }
static void module_init_ftrace_plt(const Elf_Ehdr *hdr,
const Elf_Shdr *sechdrs, struct module *mod)
{
#ifdef CONFIG_DYNAMIC_FTRACE
struct plt_entry *ftrace_plts;
ftrace_plts = (void *)sechdrs->sh_addr;
ftrace_plts[FTRACE_PLT_IDX] = emit_plt_entry(FTRACE_ADDR);
if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_REGS))
ftrace_plts[FTRACE_REGS_PLT_IDX] = emit_plt_entry(FTRACE_REGS_ADDR);
mod->arch.ftrace_trampolines = ftrace_plts;
#endif
}
int module_finalize(const Elf_Ehdr *hdr,
const Elf_Shdr *sechdrs, struct module *mod)
{
const Elf_Shdr *s, *se;
const char *secstrs = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
for (s = sechdrs, se = sechdrs + hdr->e_shnum; s < se; s++) {
if (!strcmp(".altinstructions", secstrs + s->sh_name))
apply_alternatives((void *)s->sh_addr, (void *)s->sh_addr + s->sh_size);
if (!strcmp(".ftrace_trampoline", secstrs + s->sh_name))
module_init_ftrace_plt(hdr, s, mod);
}
return 0;
}
...@@ -388,6 +388,21 @@ static void __init numa_default_distance(void) ...@@ -388,6 +388,21 @@ static void __init numa_default_distance(void)
} }
} }
/*
* fake_numa_init() - For Non-ACPI systems
* Return: 0 on success, -errno on failure.
*/
static int __init fake_numa_init(void)
{
phys_addr_t start = memblock_start_of_DRAM();
phys_addr_t end = memblock_end_of_DRAM() - 1;
node_set(0, numa_nodes_parsed);
pr_info("Faking a node at [mem %pap-%pap]\n", &start, &end);
return numa_add_memblk(0, start, end + 1);
}
int __init init_numa_memory(void) int __init init_numa_memory(void)
{ {
int i; int i;
...@@ -404,7 +419,7 @@ int __init init_numa_memory(void) ...@@ -404,7 +419,7 @@ int __init init_numa_memory(void)
memset(&numa_meminfo, 0, sizeof(numa_meminfo)); memset(&numa_meminfo, 0, sizeof(numa_meminfo));
/* Parse SRAT and SLIT if provided by firmware. */ /* Parse SRAT and SLIT if provided by firmware. */
ret = acpi_numa_init(); ret = acpi_disabled ? fake_numa_init() : acpi_numa_init();
if (ret < 0) if (ret < 0)
return ret; return ret;
......
...@@ -47,6 +47,12 @@ ...@@ -47,6 +47,12 @@
#include <asm/unwind.h> #include <asm/unwind.h>
#include <asm/vdso.h> #include <asm/vdso.h>
#ifdef CONFIG_STACKPROTECTOR
#include <linux/stackprotector.h>
unsigned long __stack_chk_guard __read_mostly;
EXPORT_SYMBOL(__stack_chk_guard);
#endif
/* /*
* Idle related variables and functions * Idle related variables and functions
*/ */
......
...@@ -15,6 +15,7 @@ ...@@ -15,6 +15,7 @@
#include <acpi/reboot.h> #include <acpi/reboot.h>
#include <asm/idle.h> #include <asm/idle.h>
#include <asm/loongarch.h> #include <asm/loongarch.h>
#include <asm/loongson.h>
void (*pm_power_off)(void); void (*pm_power_off)(void);
EXPORT_SYMBOL(pm_power_off); EXPORT_SYMBOL(pm_power_off);
...@@ -41,6 +42,10 @@ void machine_power_off(void) ...@@ -41,6 +42,10 @@ void machine_power_off(void)
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
preempt_disable(); preempt_disable();
smp_send_stop(); smp_send_stop();
#endif
#ifdef CONFIG_PM
if (!acpi_disabled)
enable_pci_wakeup();
#endif #endif
do_kernel_power_off(); do_kernel_power_off();
#ifdef CONFIG_EFI #ifdef CONFIG_EFI
......
...@@ -28,10 +28,16 @@ ...@@ -28,10 +28,16 @@
#include <linux/sizes.h> #include <linux/sizes.h>
#include <linux/device.h> #include <linux/device.h>
#include <linux/dma-map-ops.h> #include <linux/dma-map-ops.h>
#include <linux/libfdt.h>
#include <linux/of_fdt.h>
#include <linux/of_address.h>
#include <linux/suspend.h>
#include <linux/swiotlb.h> #include <linux/swiotlb.h>
#include <asm/addrspace.h> #include <asm/addrspace.h>
#include <asm/alternative.h>
#include <asm/bootinfo.h> #include <asm/bootinfo.h>
#include <asm/bugs.h>
#include <asm/cache.h> #include <asm/cache.h>
#include <asm/cpu.h> #include <asm/cpu.h>
#include <asm/dma.h> #include <asm/dma.h>
...@@ -67,6 +73,7 @@ static const char dmi_empty_string[] = " "; ...@@ -67,6 +73,7 @@ static const char dmi_empty_string[] = " ";
* *
* These are initialized so they are in the .data section * These are initialized so they are in the .data section
*/ */
char init_command_line[COMMAND_LINE_SIZE] __initdata;
static int num_standard_resources; static int num_standard_resources;
static struct resource *standard_resources; static struct resource *standard_resources;
...@@ -80,6 +87,11 @@ const char *get_system_type(void) ...@@ -80,6 +87,11 @@ const char *get_system_type(void)
return "generic-loongson-machine"; return "generic-loongson-machine";
} }
void __init check_bugs(void)
{
alternative_instructions();
}
static const char *dmi_string_parse(const struct dmi_header *dm, u8 s) static const char *dmi_string_parse(const struct dmi_header *dm, u8 s)
{ {
const u8 *bp = ((u8 *) dm) + dm->length; const u8 *bp = ((u8 *) dm) + dm->length;
...@@ -246,6 +258,58 @@ static void __init arch_parse_crashkernel(void) ...@@ -246,6 +258,58 @@ static void __init arch_parse_crashkernel(void)
#endif #endif
} }
static void __init fdt_setup(void)
{
#ifdef CONFIG_OF_EARLY_FLATTREE
void *fdt_pointer;
/* ACPI-based systems do not require parsing fdt */
if (acpi_os_get_root_pointer())
return;
/* Look for a device tree configuration table entry */
fdt_pointer = efi_fdt_pointer();
if (!fdt_pointer || fdt_check_header(fdt_pointer))
return;
early_init_dt_scan(fdt_pointer);
early_init_fdt_reserve_self();
max_low_pfn = PFN_PHYS(memblock_end_of_DRAM());
#endif
}
static void __init bootcmdline_init(char **cmdline_p)
{
/*
* If CONFIG_CMDLINE_FORCE is enabled then initializing the command line
* is trivial - we simply use the built-in command line unconditionally &
* unmodified.
*/
if (IS_ENABLED(CONFIG_CMDLINE_FORCE)) {
strscpy(boot_command_line, CONFIG_CMDLINE, COMMAND_LINE_SIZE);
goto out;
}
#ifdef CONFIG_OF_FLATTREE
/*
* If CONFIG_CMDLINE_BOOTLOADER is enabled and we are in FDT-based system,
* the boot_command_line will be overwritten by early_init_dt_scan_chosen().
* So we need to append init_command_line (the original copy of boot_command_line)
* to boot_command_line.
*/
if (initial_boot_params) {
if (boot_command_line[0])
strlcat(boot_command_line, " ", COMMAND_LINE_SIZE);
strlcat(boot_command_line, init_command_line, COMMAND_LINE_SIZE);
}
#endif
out:
*cmdline_p = boot_command_line;
}
void __init platform_init(void) void __init platform_init(void)
{ {
arch_reserve_vmcore(); arch_reserve_vmcore();
...@@ -258,6 +322,7 @@ void __init platform_init(void) ...@@ -258,6 +322,7 @@ void __init platform_init(void)
acpi_gbl_use_default_register_widths = false; acpi_gbl_use_default_register_widths = false;
acpi_boot_table_init(); acpi_boot_table_init();
#endif #endif
unflatten_and_copy_device_tree();
#ifdef CONFIG_NUMA #ifdef CONFIG_NUMA
init_numa_memory(); init_numa_memory();
...@@ -290,6 +355,8 @@ static void __init arch_mem_init(char **cmdline_p) ...@@ -290,6 +355,8 @@ static void __init arch_mem_init(char **cmdline_p)
check_kernel_sections_mem(); check_kernel_sections_mem();
early_init_fdt_scan_reserved_mem();
/* /*
* In order to reduce the possibility of kernel panic when failed to * In order to reduce the possibility of kernel panic when failed to
* get IO TLB memory under CONFIG_SWIOTLB, it is better to allocate * get IO TLB memory under CONFIG_SWIOTLB, it is better to allocate
...@@ -304,6 +371,10 @@ static void __init arch_mem_init(char **cmdline_p) ...@@ -304,6 +371,10 @@ static void __init arch_mem_init(char **cmdline_p)
dma_contiguous_reserve(PFN_PHYS(max_low_pfn)); dma_contiguous_reserve(PFN_PHYS(max_low_pfn));
/* Reserve for hibernation. */
register_nosave_region(PFN_DOWN(__pa_symbol(&__nosave_begin)),
PFN_UP(__pa_symbol(&__nosave_end)));
memblock_dump_all(); memblock_dump_all();
early_memtest(PFN_PHYS(ARCH_PFN_OFFSET), PFN_PHYS(max_low_pfn)); early_memtest(PFN_PHYS(ARCH_PFN_OFFSET), PFN_PHYS(max_low_pfn));
...@@ -363,6 +434,81 @@ static void __init resource_init(void) ...@@ -363,6 +434,81 @@ static void __init resource_init(void)
#endif #endif
} }
static int __init add_legacy_isa_io(struct fwnode_handle *fwnode,
resource_size_t hw_start, resource_size_t size)
{
int ret = 0;
unsigned long vaddr;
struct logic_pio_hwaddr *range;
range = kzalloc(sizeof(*range), GFP_ATOMIC);
if (!range)
return -ENOMEM;
range->fwnode = fwnode;
range->size = size = round_up(size, PAGE_SIZE);
range->hw_start = hw_start;
range->flags = LOGIC_PIO_CPU_MMIO;
ret = logic_pio_register_range(range);
if (ret) {
kfree(range);
return ret;
}
/* Legacy ISA must placed at the start of PCI_IOBASE */
if (range->io_start != 0) {
logic_pio_unregister_range(range);
kfree(range);
return -EINVAL;
}
vaddr = (unsigned long)(PCI_IOBASE + range->io_start);
ioremap_page_range(vaddr, vaddr + size, hw_start, pgprot_device(PAGE_KERNEL));
return 0;
}
static __init int arch_reserve_pio_range(void)
{
struct device_node *np;
for_each_node_by_name(np, "isa") {
struct of_range range;
struct of_range_parser parser;
pr_info("ISA Bridge: %pOF\n", np);
if (of_range_parser_init(&parser, np)) {
pr_info("Failed to parse resources.\n");
of_node_put(np);
break;
}
for_each_of_range(&parser, &range) {
switch (range.flags & IORESOURCE_TYPE_BITS) {
case IORESOURCE_IO:
pr_info(" IO 0x%016llx..0x%016llx -> 0x%016llx\n",
range.cpu_addr,
range.cpu_addr + range.size - 1,
range.bus_addr);
if (add_legacy_isa_io(&np->fwnode, range.cpu_addr, range.size))
pr_warn("Failed to reserve legacy IO in Logic PIO\n");
break;
case IORESOURCE_MEM:
pr_info(" MEM 0x%016llx..0x%016llx -> 0x%016llx\n",
range.cpu_addr,
range.cpu_addr + range.size - 1,
range.bus_addr);
break;
}
}
}
return 0;
}
arch_initcall(arch_reserve_pio_range);
static int __init reserve_memblock_reserved_regions(void) static int __init reserve_memblock_reserved_regions(void)
{ {
u64 i, j; u64 i, j;
...@@ -415,12 +561,13 @@ static void __init prefill_possible_map(void) ...@@ -415,12 +561,13 @@ static void __init prefill_possible_map(void)
void __init setup_arch(char **cmdline_p) void __init setup_arch(char **cmdline_p)
{ {
cpu_probe(); cpu_probe();
*cmdline_p = boot_command_line;
init_environ(); init_environ();
efi_init(); efi_init();
fdt_setup();
memblock_init(); memblock_init();
pagetable_init(); pagetable_init();
bootcmdline_init(cmdline_p);
parse_early_param(); parse_early_param();
reserve_initrd_mem(); reserve_initrd_mem();
......
...@@ -16,6 +16,7 @@ ...@@ -16,6 +16,7 @@
#include <linux/smp.h> #include <linux/smp.h>
#include <linux/threads.h> #include <linux/threads.h>
#include <linux/export.h> #include <linux/export.h>
#include <linux/syscore_ops.h>
#include <linux/time.h> #include <linux/time.h>
#include <linux/tracepoint.h> #include <linux/tracepoint.h>
#include <linux/sched/hotplug.h> #include <linux/sched/hotplug.h>
...@@ -180,8 +181,42 @@ irqreturn_t loongson_ipi_interrupt(int irq, void *dev) ...@@ -180,8 +181,42 @@ irqreturn_t loongson_ipi_interrupt(int irq, void *dev)
return IRQ_HANDLED; return IRQ_HANDLED;
} }
static void __init fdt_smp_setup(void)
{
#ifdef CONFIG_OF
unsigned int cpu, cpuid;
struct device_node *node = NULL;
for_each_of_cpu_node(node) {
if (!of_device_is_available(node))
continue;
cpuid = of_get_cpu_hwid(node, 0);
if (cpuid >= nr_cpu_ids)
continue;
if (cpuid == loongson_sysconf.boot_cpu_id) {
cpu = 0;
numa_add_cpu(cpu);
} else {
cpu = cpumask_next_zero(-1, cpu_present_mask);
}
num_processors++;
set_cpu_possible(cpu, true);
set_cpu_present(cpu, true);
__cpu_number_map[cpuid] = cpu;
__cpu_logical_map[cpu] = cpuid;
}
loongson_sysconf.nr_cpus = num_processors;
#endif
}
void __init loongson_smp_setup(void) void __init loongson_smp_setup(void)
{ {
fdt_smp_setup();
cpu_data[0].core = cpu_logical_map(0) % loongson_sysconf.cores_per_package; cpu_data[0].core = cpu_logical_map(0) % loongson_sysconf.cores_per_package;
cpu_data[0].package = cpu_logical_map(0) / loongson_sysconf.cores_per_package; cpu_data[0].package = cpu_logical_map(0) / loongson_sysconf.cores_per_package;
......
...@@ -23,6 +23,11 @@ SYM_FUNC_START(__switch_to) ...@@ -23,6 +23,11 @@ SYM_FUNC_START(__switch_to)
stptr.d ra, a0, THREAD_REG01 stptr.d ra, a0, THREAD_REG01
stptr.d a3, a0, THREAD_SCHED_RA stptr.d a3, a0, THREAD_SCHED_RA
stptr.d a4, a0, THREAD_SCHED_CFA stptr.d a4, a0, THREAD_SCHED_CFA
#if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_SMP)
la t7, __stack_chk_guard
LONG_L t8, a1, TASK_STACK_CANARY
LONG_S t8, t7, 0
#endif
move tp, a2 move tp, a2
cpu_restore_nonscratch a1 cpu_restore_nonscratch a1
......
...@@ -115,12 +115,17 @@ static unsigned long __init get_loops_per_jiffy(void) ...@@ -115,12 +115,17 @@ static unsigned long __init get_loops_per_jiffy(void)
return lpj; return lpj;
} }
static long init_timeval; static long init_offset __nosavedata;
void save_counter(void)
{
init_offset = drdtime();
}
void sync_counter(void) void sync_counter(void)
{ {
/* Ensure counter begin at 0 */ /* Ensure counter begin at 0 */
csr_write64(-init_timeval, LOONGARCH_CSR_CNTC); csr_write64(init_offset, LOONGARCH_CSR_CNTC);
} }
static int get_timer_irq(void) static int get_timer_irq(void)
...@@ -219,7 +224,7 @@ void __init time_init(void) ...@@ -219,7 +224,7 @@ void __init time_init(void)
else else
const_clock_freq = calc_const_freq(); const_clock_freq = calc_const_freq();
init_timeval = drdtime() - csr_read64(LOONGARCH_CSR_CNTC); init_offset = -(drdtime() - csr_read64(LOONGARCH_CSR_CNTC));
constant_clockevent_init(); constant_clockevent_init();
constant_clocksource_init(); constant_clocksource_init();
......
...@@ -368,13 +368,40 @@ asmlinkage void noinstr do_ade(struct pt_regs *regs) ...@@ -368,13 +368,40 @@ asmlinkage void noinstr do_ade(struct pt_regs *regs)
irqentry_exit(regs, state); irqentry_exit(regs, state);
} }
/* sysctl hooks */
int unaligned_enabled __read_mostly = 1; /* Enabled by default */
int no_unaligned_warning __read_mostly = 1; /* Only 1 warning by default */
asmlinkage void noinstr do_ale(struct pt_regs *regs) asmlinkage void noinstr do_ale(struct pt_regs *regs)
{ {
unsigned int *pc;
irqentry_state_t state = irqentry_enter(regs); irqentry_state_t state = irqentry_enter(regs);
perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, regs->csr_badvaddr);
/*
* Did we catch a fault trying to load an instruction?
*/
if (regs->csr_badvaddr == regs->csr_era)
goto sigbus;
if (user_mode(regs) && !test_thread_flag(TIF_FIXADE))
goto sigbus;
if (!unaligned_enabled)
goto sigbus;
if (!no_unaligned_warning)
show_registers(regs);
pc = (unsigned int *)exception_era(regs);
emulate_load_store_insn(regs, (void __user *)regs->csr_badvaddr, pc);
goto out;
sigbus:
die_if_kernel("Kernel ale access", regs); die_if_kernel("Kernel ale access", regs);
force_sig_fault(SIGBUS, BUS_ADRALN, (void __user *)regs->csr_badvaddr); force_sig_fault(SIGBUS, BUS_ADRALN, (void __user *)regs->csr_badvaddr);
out:
irqentry_exit(regs, state); irqentry_exit(regs, state);
} }
......
// SPDX-License-Identifier: GPL-2.0
/*
* Handle unaligned accesses by emulation.
*
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited
*
* Derived from MIPS:
* Copyright (C) 1996, 1998, 1999, 2002 by Ralf Baechle
* Copyright (C) 1999 Silicon Graphics, Inc.
* Copyright (C) 2014 Imagination Technologies Ltd.
*/
#include <linux/mm.h>
#include <linux/sched.h>
#include <linux/signal.h>
#include <linux/debugfs.h>
#include <linux/perf_event.h>
#include <asm/asm.h>
#include <asm/branch.h>
#include <asm/fpu.h>
#include <asm/inst.h>
#include "access-helper.h"
#ifdef CONFIG_DEBUG_FS
static u32 unaligned_instructions_user;
static u32 unaligned_instructions_kernel;
#endif
static inline unsigned long read_fpr(unsigned int idx)
{
#define READ_FPR(idx, __value) \
__asm__ __volatile__("movfr2gr.d %0, $f"#idx"\n\t" : "=r"(__value));
unsigned long __value;
switch (idx) {
case 0:
READ_FPR(0, __value);
break;
case 1:
READ_FPR(1, __value);
break;
case 2:
READ_FPR(2, __value);
break;
case 3:
READ_FPR(3, __value);
break;
case 4:
READ_FPR(4, __value);
break;
case 5:
READ_FPR(5, __value);
break;
case 6:
READ_FPR(6, __value);
break;
case 7:
READ_FPR(7, __value);
break;
case 8:
READ_FPR(8, __value);
break;
case 9:
READ_FPR(9, __value);
break;
case 10:
READ_FPR(10, __value);
break;
case 11:
READ_FPR(11, __value);
break;
case 12:
READ_FPR(12, __value);
break;
case 13:
READ_FPR(13, __value);
break;
case 14:
READ_FPR(14, __value);
break;
case 15:
READ_FPR(15, __value);
break;
case 16:
READ_FPR(16, __value);
break;
case 17:
READ_FPR(17, __value);
break;
case 18:
READ_FPR(18, __value);
break;
case 19:
READ_FPR(19, __value);
break;
case 20:
READ_FPR(20, __value);
break;
case 21:
READ_FPR(21, __value);
break;
case 22:
READ_FPR(22, __value);
break;
case 23:
READ_FPR(23, __value);
break;
case 24:
READ_FPR(24, __value);
break;
case 25:
READ_FPR(25, __value);
break;
case 26:
READ_FPR(26, __value);
break;
case 27:
READ_FPR(27, __value);
break;
case 28:
READ_FPR(28, __value);
break;
case 29:
READ_FPR(29, __value);
break;
case 30:
READ_FPR(30, __value);
break;
case 31:
READ_FPR(31, __value);
break;
default:
panic("unexpected idx '%d'", idx);
}
#undef READ_FPR
return __value;
}
static inline void write_fpr(unsigned int idx, unsigned long value)
{
#define WRITE_FPR(idx, value) \
__asm__ __volatile__("movgr2fr.d $f"#idx", %0\n\t" :: "r"(value));
switch (idx) {
case 0:
WRITE_FPR(0, value);
break;
case 1:
WRITE_FPR(1, value);
break;
case 2:
WRITE_FPR(2, value);
break;
case 3:
WRITE_FPR(3, value);
break;
case 4:
WRITE_FPR(4, value);
break;
case 5:
WRITE_FPR(5, value);
break;
case 6:
WRITE_FPR(6, value);
break;
case 7:
WRITE_FPR(7, value);
break;
case 8:
WRITE_FPR(8, value);
break;
case 9:
WRITE_FPR(9, value);
break;
case 10:
WRITE_FPR(10, value);
break;
case 11:
WRITE_FPR(11, value);
break;
case 12:
WRITE_FPR(12, value);
break;
case 13:
WRITE_FPR(13, value);
break;
case 14:
WRITE_FPR(14, value);
break;
case 15:
WRITE_FPR(15, value);
break;
case 16:
WRITE_FPR(16, value);
break;
case 17:
WRITE_FPR(17, value);
break;
case 18:
WRITE_FPR(18, value);
break;
case 19:
WRITE_FPR(19, value);
break;
case 20:
WRITE_FPR(20, value);
break;
case 21:
WRITE_FPR(21, value);
break;
case 22:
WRITE_FPR(22, value);
break;
case 23:
WRITE_FPR(23, value);
break;
case 24:
WRITE_FPR(24, value);
break;
case 25:
WRITE_FPR(25, value);
break;
case 26:
WRITE_FPR(26, value);
break;
case 27:
WRITE_FPR(27, value);
break;
case 28:
WRITE_FPR(28, value);
break;
case 29:
WRITE_FPR(29, value);
break;
case 30:
WRITE_FPR(30, value);
break;
case 31:
WRITE_FPR(31, value);
break;
default:
panic("unexpected idx '%d'", idx);
}
#undef WRITE_FPR
}
void emulate_load_store_insn(struct pt_regs *regs, void __user *addr, unsigned int *pc)
{
bool fp = false;
bool sign, write;
bool user = user_mode(regs);
unsigned int res, size = 0;
unsigned long value = 0;
union loongarch_instruction insn;
perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0);
__get_inst(&insn.word, pc, user);
switch (insn.reg2i12_format.opcode) {
case ldh_op:
size = 2;
sign = true;
write = false;
break;
case ldhu_op:
size = 2;
sign = false;
write = false;
break;
case sth_op:
size = 2;
sign = true;
write = true;
break;
case ldw_op:
size = 4;
sign = true;
write = false;
break;
case ldwu_op:
size = 4;
sign = false;
write = false;
break;
case stw_op:
size = 4;
sign = true;
write = true;
break;
case ldd_op:
size = 8;
sign = true;
write = false;
break;
case std_op:
size = 8;
sign = true;
write = true;
break;
case flds_op:
size = 4;
fp = true;
sign = true;
write = false;
break;
case fsts_op:
size = 4;
fp = true;
sign = true;
write = true;
break;
case fldd_op:
size = 8;
fp = true;
sign = true;
write = false;
break;
case fstd_op:
size = 8;
fp = true;
sign = true;
write = true;
break;
}
switch (insn.reg2i14_format.opcode) {
case ldptrw_op:
size = 4;
sign = true;
write = false;
break;
case stptrw_op:
size = 4;
sign = true;
write = true;
break;
case ldptrd_op:
size = 8;
sign = true;
write = false;
break;
case stptrd_op:
size = 8;
sign = true;
write = true;
break;
}
switch (insn.reg3_format.opcode) {
case ldxh_op:
size = 2;
sign = true;
write = false;
break;
case ldxhu_op:
size = 2;
sign = false;
write = false;
break;
case stxh_op:
size = 2;
sign = true;
write = true;
break;
case ldxw_op:
size = 4;
sign = true;
write = false;
break;
case ldxwu_op:
size = 4;
sign = false;
write = false;
break;
case stxw_op:
size = 4;
sign = true;
write = true;
break;
case ldxd_op:
size = 8;
sign = true;
write = false;
break;
case stxd_op:
size = 8;
sign = true;
write = true;
break;
case fldxs_op:
size = 4;
fp = true;
sign = true;
write = false;
break;
case fstxs_op:
size = 4;
fp = true;
sign = true;
write = true;
break;
case fldxd_op:
size = 8;
fp = true;
sign = true;
write = false;
break;
case fstxd_op:
size = 8;
fp = true;
sign = true;
write = true;
break;
}
if (!size)
goto sigbus;
if (user && !access_ok(addr, size))
goto sigbus;
if (!write) {
res = unaligned_read(addr, &value, size, sign);
if (res)
goto fault;
/* Rd is the same field in any formats */
if (!fp)
regs->regs[insn.reg3_format.rd] = value;
else {
if (is_fpu_owner())
write_fpr(insn.reg3_format.rd, value);
else
set_fpr64(&current->thread.fpu.fpr[insn.reg3_format.rd], 0, value);
}
} else {
/* Rd is the same field in any formats */
if (!fp)
value = regs->regs[insn.reg3_format.rd];
else {
if (is_fpu_owner())
value = read_fpr(insn.reg3_format.rd);
else
value = get_fpr64(&current->thread.fpu.fpr[insn.reg3_format.rd], 0);
}
res = unaligned_write(addr, value, size);
if (res)
goto fault;
}
#ifdef CONFIG_DEBUG_FS
if (user)
unaligned_instructions_user++;
else
unaligned_instructions_kernel++;
#endif
compute_return_era(regs);
return;
fault:
/* Did we have an exception handler installed? */
if (fixup_exception(regs))
return;
die_if_kernel("Unhandled kernel unaligned access", regs);
force_sig(SIGSEGV);
return;
sigbus:
die_if_kernel("Unhandled kernel unaligned access", regs);
force_sig(SIGBUS);
return;
}
#ifdef CONFIG_DEBUG_FS
static int __init debugfs_unaligned(void)
{
struct dentry *d;
d = debugfs_create_dir("loongarch", NULL);
if (!d)
return -ENOMEM;
debugfs_create_u32("unaligned_instructions_user",
S_IRUGO, d, &unaligned_instructions_user);
debugfs_create_u32("unaligned_instructions_kernel",
S_IRUGO, d, &unaligned_instructions_kernel);
return 0;
}
arch_initcall(debugfs_unaligned);
#endif
...@@ -3,6 +3,7 @@ ...@@ -3,6 +3,7 @@
* Copyright (C) 2022 Loongson Technology Corporation Limited * Copyright (C) 2022 Loongson Technology Corporation Limited
*/ */
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/ftrace.h>
#include <asm/unwind.h> #include <asm/unwind.h>
...@@ -53,7 +54,8 @@ bool unwind_next_frame(struct unwind_state *state) ...@@ -53,7 +54,8 @@ bool unwind_next_frame(struct unwind_state *state)
state->sp < info->end; state->sp < info->end;
state->sp += sizeof(unsigned long)) { state->sp += sizeof(unsigned long)) {
addr = *(unsigned long *)(state->sp); addr = *(unsigned long *)(state->sp);
state->pc = ftrace_graph_ret_addr(state->task, &state->graph_idx,
addr, (unsigned long *)(state->sp - GRAPH_FAKE_OFFSET));
if (__kernel_text_address(addr)) if (__kernel_text_address(addr))
return true; return true;
} }
......
...@@ -2,12 +2,23 @@ ...@@ -2,12 +2,23 @@
/* /*
* Copyright (C) 2022 Loongson Technology Corporation Limited * Copyright (C) 2022 Loongson Technology Corporation Limited
*/ */
#include <linux/ftrace.h>
#include <linux/kallsyms.h> #include <linux/kallsyms.h>
#include <asm/inst.h> #include <asm/inst.h>
#include <asm/ptrace.h> #include <asm/ptrace.h>
#include <asm/unwind.h> #include <asm/unwind.h>
static inline void unwind_state_fixup(struct unwind_state *state)
{
#ifdef CONFIG_DYNAMIC_FTRACE
static unsigned long ftrace = (unsigned long)ftrace_call + 4;
if (state->pc == ftrace)
state->is_ftrace = true;
#endif
}
unsigned long unwind_get_return_address(struct unwind_state *state) unsigned long unwind_get_return_address(struct unwind_state *state)
{ {
...@@ -32,6 +43,8 @@ static bool unwind_by_guess(struct unwind_state *state) ...@@ -32,6 +43,8 @@ static bool unwind_by_guess(struct unwind_state *state)
state->sp < info->end; state->sp < info->end;
state->sp += sizeof(unsigned long)) { state->sp += sizeof(unsigned long)) {
addr = *(unsigned long *)(state->sp); addr = *(unsigned long *)(state->sp);
state->pc = ftrace_graph_ret_addr(state->task, &state->graph_idx,
addr, (unsigned long *)(state->sp - GRAPH_FAKE_OFFSET));
if (__kernel_text_address(addr)) if (__kernel_text_address(addr))
return true; return true;
} }
...@@ -41,15 +54,30 @@ static bool unwind_by_guess(struct unwind_state *state) ...@@ -41,15 +54,30 @@ static bool unwind_by_guess(struct unwind_state *state)
static bool unwind_by_prologue(struct unwind_state *state) static bool unwind_by_prologue(struct unwind_state *state)
{ {
struct stack_info *info = &state->stack_info;
union loongarch_instruction *ip, *ip_end;
long frame_ra = -1; long frame_ra = -1;
unsigned long frame_size = 0; unsigned long frame_size = 0;
unsigned long size, offset, pc = state->pc; unsigned long size, offset, pc = state->pc;
struct pt_regs *regs;
struct stack_info *info = &state->stack_info;
union loongarch_instruction *ip, *ip_end;
if (state->sp >= info->end || state->sp < info->begin) if (state->sp >= info->end || state->sp < info->begin)
return false; return false;
if (state->is_ftrace) {
/*
* As we meet ftrace_regs_entry, reset first flag like first doing
* tracing. Prologue analysis will stop soon because PC is at entry.
*/
regs = (struct pt_regs *)state->sp;
state->first = true;
state->is_ftrace = false;
state->pc = regs->csr_era;
state->ra = regs->regs[1];
state->sp = regs->regs[3];
return true;
}
if (!kallsyms_lookup_size_offset(pc, &size, &offset)) if (!kallsyms_lookup_size_offset(pc, &size, &offset))
return false; return false;
...@@ -95,7 +123,7 @@ static bool unwind_by_prologue(struct unwind_state *state) ...@@ -95,7 +123,7 @@ static bool unwind_by_prologue(struct unwind_state *state)
state->pc = *(unsigned long *)(state->sp + frame_ra); state->pc = *(unsigned long *)(state->sp + frame_ra);
state->sp = state->sp + frame_size; state->sp = state->sp + frame_size;
return !!__kernel_text_address(state->pc); goto out;
first: first:
state->first = false; state->first = false;
...@@ -104,7 +132,9 @@ static bool unwind_by_prologue(struct unwind_state *state) ...@@ -104,7 +132,9 @@ static bool unwind_by_prologue(struct unwind_state *state)
state->pc = state->ra; state->pc = state->ra;
return !!__kernel_text_address(state->ra); out:
unwind_state_fixup(state);
return !!__kernel_text_address(state->pc);
} }
void unwind_start(struct unwind_state *state, struct task_struct *task, void unwind_start(struct unwind_state *state, struct task_struct *task,
...@@ -147,8 +177,11 @@ bool unwind_next_frame(struct unwind_state *state) ...@@ -147,8 +177,11 @@ bool unwind_next_frame(struct unwind_state *state)
break; break;
case UNWINDER_PROLOGUE: case UNWINDER_PROLOGUE:
if (unwind_by_prologue(state)) if (unwind_by_prologue(state)) {
state->pc = ftrace_graph_ret_addr(state->task, &state->graph_idx,
state->pc, (unsigned long *)(state->sp - GRAPH_FAKE_OFFSET));
return true; return true;
}
if (info->type == STACK_TYPE_IRQ && if (info->type == STACK_TYPE_IRQ &&
info->end == state->sp) { info->end == state->sp) {
...@@ -158,10 +191,11 @@ bool unwind_next_frame(struct unwind_state *state) ...@@ -158,10 +191,11 @@ bool unwind_next_frame(struct unwind_state *state)
if (user_mode(regs) || !__kernel_text_address(pc)) if (user_mode(regs) || !__kernel_text_address(pc))
return false; return false;
state->pc = pc;
state->sp = regs->regs[3];
state->ra = regs->regs[1];
state->first = true; state->first = true;
state->ra = regs->regs[1];
state->sp = regs->regs[3];
state->pc = ftrace_graph_ret_addr(state->task, &state->graph_idx,
pc, (unsigned long *)(state->sp - GRAPH_FAKE_OFFSET));
get_stack_info(state->sp, state->task, info); get_stack_info(state->sp, state->task, info);
return true; return true;
......
...@@ -4,6 +4,7 @@ ...@@ -4,6 +4,7 @@
#include <asm/thread_info.h> #include <asm/thread_info.h>
#define PAGE_SIZE _PAGE_SIZE #define PAGE_SIZE _PAGE_SIZE
#define RO_EXCEPTION_TABLE_ALIGN 4
/* /*
* Put .bss..swapper_pg_dir as the first thing in .bss. This will * Put .bss..swapper_pg_dir as the first thing in .bss. This will
...@@ -53,7 +54,17 @@ SECTIONS ...@@ -53,7 +54,17 @@ SECTIONS
. = ALIGN(PECOFF_SEGMENT_ALIGN); . = ALIGN(PECOFF_SEGMENT_ALIGN);
_etext = .; _etext = .;
EXCEPTION_TABLE(16) /*
* struct alt_inst entries. From the header (alternative.h):
* "Alternative instructions for different CPU types or capabilities"
* Think locking instructions on spinlocks.
*/
. = ALIGN(4);
.altinstructions : AT(ADDR(.altinstructions) - LOAD_OFFSET) {
__alt_instructions = .;
*(.altinstructions)
__alt_instructions_end = .;
}
.got : ALIGN(16) { *(.got) } .got : ALIGN(16) { *(.got) }
.plt : ALIGN(16) { *(.plt) } .plt : ALIGN(16) { *(.plt) }
......
...@@ -3,4 +3,5 @@ ...@@ -3,4 +3,5 @@
# Makefile for LoongArch-specific library files. # Makefile for LoongArch-specific library files.
# #
lib-y += delay.o clear_user.o copy_user.o dump_tlb.o lib-y += delay.o memset.o memcpy.o memmove.o \
clear_user.o copy_user.o dump_tlb.o unaligned.o
...@@ -3,30 +3,37 @@ ...@@ -3,30 +3,37 @@
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
*/ */
#include <asm/alternative-asm.h>
#include <asm/asm.h> #include <asm/asm.h>
#include <asm/asmmacro.h> #include <asm/asmmacro.h>
#include <asm/asm-extable.h>
#include <asm/cpu.h>
#include <asm/export.h> #include <asm/export.h>
#include <asm/regdef.h> #include <asm/regdef.h>
.macro fixup_ex from, to, offset, fix .irp to, 0, 1, 2, 3, 4, 5, 6, 7
.if \fix .L_fixup_handle_\to\():
.section .fixup, "ax" addi.d a0, a1, (\to) * (-8)
\to: addi.d a0, a1, \offset
jr ra jr ra
.previous .endr
.endif
.section __ex_table, "a" SYM_FUNC_START(__clear_user)
PTR \from\()b, \to\()b /*
.previous * Some CPUs support hardware unaligned access
.endm */
ALTERNATIVE "b __clear_user_generic", \
"b __clear_user_fast", CPU_FEATURE_UAL
SYM_FUNC_END(__clear_user)
EXPORT_SYMBOL(__clear_user)
/* /*
* unsigned long __clear_user(void *addr, size_t size) * unsigned long __clear_user_generic(void *addr, size_t size)
* *
* a0: addr * a0: addr
* a1: size * a1: size
*/ */
SYM_FUNC_START(__clear_user) SYM_FUNC_START(__clear_user_generic)
beqz a1, 2f beqz a1, 2f
1: st.b zero, a0, 0 1: st.b zero, a0, 0
...@@ -37,7 +44,55 @@ SYM_FUNC_START(__clear_user) ...@@ -37,7 +44,55 @@ SYM_FUNC_START(__clear_user)
2: move a0, a1 2: move a0, a1
jr ra jr ra
fixup_ex 1, 3, 0, 1 _asm_extable 1b, .L_fixup_handle_0
SYM_FUNC_END(__clear_user) SYM_FUNC_END(__clear_user_generic)
EXPORT_SYMBOL(__clear_user) /*
* unsigned long __clear_user_fast(void *addr, unsigned long size)
*
* a0: addr
* a1: size
*/
SYM_FUNC_START(__clear_user_fast)
beqz a1, 10f
ori a2, zero, 64
blt a1, a2, 9f
/* set 64 bytes at a time */
1: st.d zero, a0, 0
2: st.d zero, a0, 8
3: st.d zero, a0, 16
4: st.d zero, a0, 24
5: st.d zero, a0, 32
6: st.d zero, a0, 40
7: st.d zero, a0, 48
8: st.d zero, a0, 56
addi.d a0, a0, 64
addi.d a1, a1, -64
bge a1, a2, 1b
beqz a1, 10f
/* set the remaining bytes */
9: st.b zero, a0, 0
addi.d a0, a0, 1
addi.d a1, a1, -1
bgt a1, zero, 9b
/* return */
10: move a0, a1
jr ra
/* fixup and ex_table */
_asm_extable 1b, .L_fixup_handle_0
_asm_extable 2b, .L_fixup_handle_1
_asm_extable 3b, .L_fixup_handle_2
_asm_extable 4b, .L_fixup_handle_3
_asm_extable 5b, .L_fixup_handle_4
_asm_extable 6b, .L_fixup_handle_5
_asm_extable 7b, .L_fixup_handle_6
_asm_extable 8b, .L_fixup_handle_7
_asm_extable 9b, .L_fixup_handle_0
SYM_FUNC_END(__clear_user_fast)
...@@ -3,31 +3,38 @@ ...@@ -3,31 +3,38 @@
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
*/ */
#include <asm/alternative-asm.h>
#include <asm/asm.h> #include <asm/asm.h>
#include <asm/asmmacro.h> #include <asm/asmmacro.h>
#include <asm/asm-extable.h>
#include <asm/cpu.h>
#include <asm/export.h> #include <asm/export.h>
#include <asm/regdef.h> #include <asm/regdef.h>
.macro fixup_ex from, to, offset, fix .irp to, 0, 1, 2, 3, 4, 5, 6, 7
.if \fix .L_fixup_handle_\to\():
.section .fixup, "ax" addi.d a0, a2, (\to) * (-8)
\to: addi.d a0, a2, \offset
jr ra jr ra
.previous .endr
.endif
.section __ex_table, "a" SYM_FUNC_START(__copy_user)
PTR \from\()b, \to\()b /*
.previous * Some CPUs support hardware unaligned access
.endm */
ALTERNATIVE "b __copy_user_generic", \
"b __copy_user_fast", CPU_FEATURE_UAL
SYM_FUNC_END(__copy_user)
EXPORT_SYMBOL(__copy_user)
/* /*
* unsigned long __copy_user(void *to, const void *from, size_t n) * unsigned long __copy_user_generic(void *to, const void *from, size_t n)
* *
* a0: to * a0: to
* a1: from * a1: from
* a2: n * a2: n
*/ */
SYM_FUNC_START(__copy_user) SYM_FUNC_START(__copy_user_generic)
beqz a2, 3f beqz a2, 3f
1: ld.b t0, a1, 0 1: ld.b t0, a1, 0
...@@ -40,8 +47,77 @@ SYM_FUNC_START(__copy_user) ...@@ -40,8 +47,77 @@ SYM_FUNC_START(__copy_user)
3: move a0, a2 3: move a0, a2
jr ra jr ra
fixup_ex 1, 4, 0, 1 _asm_extable 1b, .L_fixup_handle_0
fixup_ex 2, 4, 0, 0 _asm_extable 2b, .L_fixup_handle_0
SYM_FUNC_END(__copy_user) SYM_FUNC_END(__copy_user_generic)
EXPORT_SYMBOL(__copy_user) /*
* unsigned long __copy_user_fast(void *to, const void *from, unsigned long n)
*
* a0: to
* a1: from
* a2: n
*/
SYM_FUNC_START(__copy_user_fast)
beqz a2, 19f
ori a3, zero, 64
blt a2, a3, 17f
/* copy 64 bytes at a time */
1: ld.d t0, a1, 0
2: ld.d t1, a1, 8
3: ld.d t2, a1, 16
4: ld.d t3, a1, 24
5: ld.d t4, a1, 32
6: ld.d t5, a1, 40
7: ld.d t6, a1, 48
8: ld.d t7, a1, 56
9: st.d t0, a0, 0
10: st.d t1, a0, 8
11: st.d t2, a0, 16
12: st.d t3, a0, 24
13: st.d t4, a0, 32
14: st.d t5, a0, 40
15: st.d t6, a0, 48
16: st.d t7, a0, 56
addi.d a0, a0, 64
addi.d a1, a1, 64
addi.d a2, a2, -64
bge a2, a3, 1b
beqz a2, 19f
/* copy the remaining bytes */
17: ld.b t0, a1, 0
18: st.b t0, a0, 0
addi.d a0, a0, 1
addi.d a1, a1, 1
addi.d a2, a2, -1
bgt a2, zero, 17b
/* return */
19: move a0, a2
jr ra
/* fixup and ex_table */
_asm_extable 1b, .L_fixup_handle_0
_asm_extable 2b, .L_fixup_handle_1
_asm_extable 3b, .L_fixup_handle_2
_asm_extable 4b, .L_fixup_handle_3
_asm_extable 5b, .L_fixup_handle_4
_asm_extable 6b, .L_fixup_handle_5
_asm_extable 7b, .L_fixup_handle_6
_asm_extable 8b, .L_fixup_handle_7
_asm_extable 9b, .L_fixup_handle_0
_asm_extable 10b, .L_fixup_handle_1
_asm_extable 11b, .L_fixup_handle_2
_asm_extable 12b, .L_fixup_handle_3
_asm_extable 13b, .L_fixup_handle_4
_asm_extable 14b, .L_fixup_handle_5
_asm_extable 15b, .L_fixup_handle_6
_asm_extable 16b, .L_fixup_handle_7
_asm_extable 17b, .L_fixup_handle_0
_asm_extable 18b, .L_fixup_handle_0
SYM_FUNC_END(__copy_user_fast)
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited
*/
#include <asm/alternative-asm.h>
#include <asm/asm.h>
#include <asm/asmmacro.h>
#include <asm/cpu.h>
#include <asm/export.h>
#include <asm/regdef.h>
SYM_FUNC_START(memcpy)
/*
* Some CPUs support hardware unaligned access
*/
ALTERNATIVE "b __memcpy_generic", \
"b __memcpy_fast", CPU_FEATURE_UAL
SYM_FUNC_END(memcpy)
EXPORT_SYMBOL(memcpy)
/*
* void *__memcpy_generic(void *dst, const void *src, size_t n)
*
* a0: dst
* a1: src
* a2: n
*/
SYM_FUNC_START(__memcpy_generic)
move a3, a0
beqz a2, 2f
1: ld.b t0, a1, 0
st.b t0, a0, 0
addi.d a0, a0, 1
addi.d a1, a1, 1
addi.d a2, a2, -1
bgt a2, zero, 1b
2: move a0, a3
jr ra
SYM_FUNC_END(__memcpy_generic)
/*
* void *__memcpy_fast(void *dst, const void *src, size_t n)
*
* a0: dst
* a1: src
* a2: n
*/
SYM_FUNC_START(__memcpy_fast)
move a3, a0
beqz a2, 3f
ori a4, zero, 64
blt a2, a4, 2f
/* copy 64 bytes at a time */
1: ld.d t0, a1, 0
ld.d t1, a1, 8
ld.d t2, a1, 16
ld.d t3, a1, 24
ld.d t4, a1, 32
ld.d t5, a1, 40
ld.d t6, a1, 48
ld.d t7, a1, 56
st.d t0, a0, 0
st.d t1, a0, 8
st.d t2, a0, 16
st.d t3, a0, 24
st.d t4, a0, 32
st.d t5, a0, 40
st.d t6, a0, 48
st.d t7, a0, 56
addi.d a0, a0, 64
addi.d a1, a1, 64
addi.d a2, a2, -64
bge a2, a4, 1b
beqz a2, 3f
/* copy the remaining bytes */
2: ld.b t0, a1, 0
st.b t0, a0, 0
addi.d a0, a0, 1
addi.d a1, a1, 1
addi.d a2, a2, -1
bgt a2, zero, 2b
/* return */
3: move a0, a3
jr ra
SYM_FUNC_END(__memcpy_fast)
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited
*/
#include <asm/alternative-asm.h>
#include <asm/asm.h>
#include <asm/asmmacro.h>
#include <asm/cpu.h>
#include <asm/export.h>
#include <asm/regdef.h>
SYM_FUNC_START(memmove)
blt a0, a1, 1f /* dst < src, memcpy */
blt a1, a0, 3f /* src < dst, rmemcpy */
jr ra /* dst == src, return */
/* if (src - dst) < 64, copy 1 byte at a time */
1: ori a3, zero, 64
sub.d t0, a1, a0
blt t0, a3, 2f
b memcpy
2: b __memcpy_generic
/* if (dst - src) < 64, copy 1 byte at a time */
3: ori a3, zero, 64
sub.d t0, a0, a1
blt t0, a3, 4f
b rmemcpy
4: b __rmemcpy_generic
SYM_FUNC_END(memmove)
EXPORT_SYMBOL(memmove)
SYM_FUNC_START(rmemcpy)
/*
* Some CPUs support hardware unaligned access
*/
ALTERNATIVE "b __rmemcpy_generic", \
"b __rmemcpy_fast", CPU_FEATURE_UAL
SYM_FUNC_END(rmemcpy)
/*
* void *__rmemcpy_generic(void *dst, const void *src, size_t n)
*
* a0: dst
* a1: src
* a2: n
*/
SYM_FUNC_START(__rmemcpy_generic)
move a3, a0
beqz a2, 2f
add.d a0, a0, a2
add.d a1, a1, a2
1: ld.b t0, a1, -1
st.b t0, a0, -1
addi.d a0, a0, -1
addi.d a1, a1, -1
addi.d a2, a2, -1
bgt a2, zero, 1b
2: move a0, a3
jr ra
SYM_FUNC_END(__rmemcpy_generic)
/*
* void *__rmemcpy_fast(void *dst, const void *src, size_t n)
*
* a0: dst
* a1: src
* a2: n
*/
SYM_FUNC_START(__rmemcpy_fast)
move a3, a0
beqz a2, 3f
add.d a0, a0, a2
add.d a1, a1, a2
ori a4, zero, 64
blt a2, a4, 2f
/* copy 64 bytes at a time */
1: ld.d t0, a1, -8
ld.d t1, a1, -16
ld.d t2, a1, -24
ld.d t3, a1, -32
ld.d t4, a1, -40
ld.d t5, a1, -48
ld.d t6, a1, -56
ld.d t7, a1, -64
st.d t0, a0, -8
st.d t1, a0, -16
st.d t2, a0, -24
st.d t3, a0, -32
st.d t4, a0, -40
st.d t5, a0, -48
st.d t6, a0, -56
st.d t7, a0, -64
addi.d a0, a0, -64
addi.d a1, a1, -64
addi.d a2, a2, -64
bge a2, a4, 1b
beqz a2, 3f
/* copy the remaining bytes */
2: ld.b t0, a1, -1
st.b t0, a0, -1
addi.d a0, a0, -1
addi.d a1, a1, -1
addi.d a2, a2, -1
bgt a2, zero, 2b
/* return */
3: move a0, a3
jr ra
SYM_FUNC_END(__rmemcpy_fast)
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited
*/
#include <asm/alternative-asm.h>
#include <asm/asm.h>
#include <asm/asmmacro.h>
#include <asm/cpu.h>
#include <asm/export.h>
#include <asm/regdef.h>
.macro fill_to_64 r0
bstrins.d \r0, \r0, 15, 8
bstrins.d \r0, \r0, 31, 16
bstrins.d \r0, \r0, 63, 32
.endm
SYM_FUNC_START(memset)
/*
* Some CPUs support hardware unaligned access
*/
ALTERNATIVE "b __memset_generic", \
"b __memset_fast", CPU_FEATURE_UAL
SYM_FUNC_END(memset)
EXPORT_SYMBOL(memset)
/*
* void *__memset_generic(void *s, int c, size_t n)
*
* a0: s
* a1: c
* a2: n
*/
SYM_FUNC_START(__memset_generic)
move a3, a0
beqz a2, 2f
1: st.b a1, a0, 0
addi.d a0, a0, 1
addi.d a2, a2, -1
bgt a2, zero, 1b
2: move a0, a3
jr ra
SYM_FUNC_END(__memset_generic)
/*
* void *__memset_fast(void *s, int c, size_t n)
*
* a0: s
* a1: c
* a2: n
*/
SYM_FUNC_START(__memset_fast)
move a3, a0
beqz a2, 3f
ori a4, zero, 64
blt a2, a4, 2f
/* fill a1 to 64 bits */
fill_to_64 a1
/* set 64 bytes at a time */
1: st.d a1, a0, 0
st.d a1, a0, 8
st.d a1, a0, 16
st.d a1, a0, 24
st.d a1, a0, 32
st.d a1, a0, 40
st.d a1, a0, 48
st.d a1, a0, 56
addi.d a0, a0, 64
addi.d a2, a2, -64
bge a2, a4, 1b
beqz a2, 3f
/* set the remaining bytes */
2: st.b a1, a0, 0
addi.d a0, a0, 1
addi.d a2, a2, -1
bgt a2, zero, 2b
/* return */
3: move a0, a3
jr ra
SYM_FUNC_END(__memset_fast)
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited
*/
#include <linux/linkage.h>
#include <asm/asm.h>
#include <asm/asmmacro.h>
#include <asm/asm-extable.h>
#include <asm/errno.h>
#include <asm/export.h>
#include <asm/regdef.h>
.L_fixup_handle_unaligned:
li.w a0, -EFAULT
jr ra
/*
* unsigned long unaligned_read(void *addr, void *value, unsigned long n, bool sign)
*
* a0: addr
* a1: value
* a2: n
* a3: sign
*/
SYM_FUNC_START(unaligned_read)
beqz a2, 5f
li.w t2, 0
addi.d t0, a2, -1
slli.d t1, t0, 3
add.d a0, a0, t0
beqz a3, 2f
1: ld.b t3, a0, 0
b 3f
2: ld.bu t3, a0, 0
3: sll.d t3, t3, t1
or t2, t2, t3
addi.d t1, t1, -8
addi.d a0, a0, -1
addi.d a2, a2, -1
bgtz a2, 2b
4: st.d t2, a1, 0
move a0, a2
jr ra
5: li.w a0, -EFAULT
jr ra
_asm_extable 1b, .L_fixup_handle_unaligned
_asm_extable 2b, .L_fixup_handle_unaligned
_asm_extable 4b, .L_fixup_handle_unaligned
SYM_FUNC_END(unaligned_read)
/*
* unsigned long unaligned_write(void *addr, unsigned long value, unsigned long n)
*
* a0: addr
* a1: value
* a2: n
*/
SYM_FUNC_START(unaligned_write)
beqz a2, 3f
li.w t0, 0
1: srl.d t1, a1, t0
2: st.b t1, a0, 0
addi.d t0, t0, 8
addi.d a2, a2, -1
addi.d a0, a0, 1
bgtz a2, 1b
move a0, a2
jr ra
3: li.w a0, -EFAULT
jr ra
_asm_extable 2b, .L_fixup_handle_unaligned
SYM_FUNC_END(unaligned_write)
...@@ -2,21 +2,62 @@ ...@@ -2,21 +2,62 @@
/* /*
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
*/ */
#include <linux/bitfield.h>
#include <linux/extable.h> #include <linux/extable.h>
#include <linux/spinlock.h>
#include <asm/branch.h>
#include <linux/uaccess.h> #include <linux/uaccess.h>
#include <asm/asm-extable.h>
#include <asm/branch.h>
static inline unsigned long
get_ex_fixup(const struct exception_table_entry *ex)
{
return ((unsigned long)&ex->fixup + ex->fixup);
}
static inline void regs_set_gpr(struct pt_regs *regs,
unsigned int offset, unsigned long val)
{
if (offset && offset <= MAX_REG_OFFSET)
*(unsigned long *)((unsigned long)regs + offset) = val;
}
static bool ex_handler_fixup(const struct exception_table_entry *ex,
struct pt_regs *regs)
{
regs->csr_era = get_ex_fixup(ex);
return true;
}
static bool ex_handler_uaccess_err_zero(const struct exception_table_entry *ex,
struct pt_regs *regs)
{
int reg_err = FIELD_GET(EX_DATA_REG_ERR, ex->data);
int reg_zero = FIELD_GET(EX_DATA_REG_ZERO, ex->data);
regs_set_gpr(regs, reg_err * sizeof(unsigned long), -EFAULT);
regs_set_gpr(regs, reg_zero * sizeof(unsigned long), 0);
regs->csr_era = get_ex_fixup(ex);
return true;
}
int fixup_exception(struct pt_regs *regs) bool fixup_exception(struct pt_regs *regs)
{ {
const struct exception_table_entry *fixup; const struct exception_table_entry *ex;
fixup = search_exception_tables(exception_era(regs)); ex = search_exception_tables(exception_era(regs));
if (fixup) { if (!ex)
regs->csr_era = fixup->fixup; return false;
return 1; switch (ex->type) {
case EX_TYPE_FIXUP:
return ex_handler_fixup(ex, regs);
case EX_TYPE_UACCESS_ERR_ZERO:
return ex_handler_uaccess_err_zero(ex, regs);
case EX_TYPE_BPF:
return ex_handler_bpf(ex, regs);
} }
return 0; BUG();
} }
...@@ -387,6 +387,65 @@ static bool is_signed_bpf_cond(u8 cond) ...@@ -387,6 +387,65 @@ static bool is_signed_bpf_cond(u8 cond)
cond == BPF_JSGE || cond == BPF_JSLE; cond == BPF_JSGE || cond == BPF_JSLE;
} }
#define BPF_FIXUP_REG_MASK GENMASK(31, 27)
#define BPF_FIXUP_OFFSET_MASK GENMASK(26, 0)
bool ex_handler_bpf(const struct exception_table_entry *ex,
struct pt_regs *regs)
{
int dst_reg = FIELD_GET(BPF_FIXUP_REG_MASK, ex->fixup);
off_t offset = FIELD_GET(BPF_FIXUP_OFFSET_MASK, ex->fixup);
regs->regs[dst_reg] = 0;
regs->csr_era = (unsigned long)&ex->fixup - offset;
return true;
}
/* For accesses to BTF pointers, add an entry to the exception table */
static int add_exception_handler(const struct bpf_insn *insn,
struct jit_ctx *ctx,
int dst_reg)
{
unsigned long pc;
off_t offset;
struct exception_table_entry *ex;
if (!ctx->image || !ctx->prog->aux->extable || BPF_MODE(insn->code) != BPF_PROBE_MEM)
return 0;
if (WARN_ON_ONCE(ctx->num_exentries >= ctx->prog->aux->num_exentries))
return -EINVAL;
ex = &ctx->prog->aux->extable[ctx->num_exentries];
pc = (unsigned long)&ctx->image[ctx->idx - 1];
offset = pc - (long)&ex->insn;
if (WARN_ON_ONCE(offset >= 0 || offset < INT_MIN))
return -ERANGE;
ex->insn = offset;
/*
* Since the extable follows the program, the fixup offset is always
* negative and limited to BPF_JIT_REGION_SIZE. Store a positive value
* to keep things simple, and put the destination register in the upper
* bits. We don't need to worry about buildtime or runtime sort
* modifying the upper bits because the table is already sorted, and
* isn't part of the main exception table.
*/
offset = (long)&ex->fixup - (pc + LOONGARCH_INSN_SIZE);
if (!FIELD_FIT(BPF_FIXUP_OFFSET_MASK, offset))
return -ERANGE;
ex->type = EX_TYPE_BPF;
ex->fixup = FIELD_PREP(BPF_FIXUP_OFFSET_MASK, offset) | FIELD_PREP(BPF_FIXUP_REG_MASK, dst_reg);
ctx->num_exentries++;
return 0;
}
static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, bool extra_pass) static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, bool extra_pass)
{ {
u8 tm = -1; u8 tm = -1;
...@@ -816,6 +875,10 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, bool ext ...@@ -816,6 +875,10 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, bool ext
case BPF_LDX | BPF_MEM | BPF_H: case BPF_LDX | BPF_MEM | BPF_H:
case BPF_LDX | BPF_MEM | BPF_W: case BPF_LDX | BPF_MEM | BPF_W:
case BPF_LDX | BPF_MEM | BPF_DW: case BPF_LDX | BPF_MEM | BPF_DW:
case BPF_LDX | BPF_PROBE_MEM | BPF_DW:
case BPF_LDX | BPF_PROBE_MEM | BPF_W:
case BPF_LDX | BPF_PROBE_MEM | BPF_H:
case BPF_LDX | BPF_PROBE_MEM | BPF_B:
switch (BPF_SIZE(code)) { switch (BPF_SIZE(code)) {
case BPF_B: case BPF_B:
if (is_signed_imm12(off)) { if (is_signed_imm12(off)) {
...@@ -854,6 +917,10 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, bool ext ...@@ -854,6 +917,10 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, bool ext
} }
break; break;
} }
ret = add_exception_handler(insn, ctx, dst);
if (ret)
return ret;
break; break;
/* *(size *)(dst + off) = imm */ /* *(size *)(dst + off) = imm */
...@@ -1018,6 +1085,9 @@ static int validate_code(struct jit_ctx *ctx) ...@@ -1018,6 +1085,9 @@ static int validate_code(struct jit_ctx *ctx)
return -1; return -1;
} }
if (WARN_ON_ONCE(ctx->num_exentries != ctx->prog->aux->num_exentries))
return -1;
return 0; return 0;
} }
...@@ -1025,7 +1095,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) ...@@ -1025,7 +1095,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
{ {
bool tmp_blinded = false, extra_pass = false; bool tmp_blinded = false, extra_pass = false;
u8 *image_ptr; u8 *image_ptr;
int image_size; int image_size, prog_size, extable_size;
struct jit_ctx ctx; struct jit_ctx ctx;
struct jit_data *jit_data; struct jit_data *jit_data;
struct bpf_binary_header *header; struct bpf_binary_header *header;
...@@ -1066,7 +1136,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) ...@@ -1066,7 +1136,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
image_ptr = jit_data->image; image_ptr = jit_data->image;
header = jit_data->header; header = jit_data->header;
extra_pass = true; extra_pass = true;
image_size = sizeof(u32) * ctx.idx; prog_size = sizeof(u32) * ctx.idx;
goto skip_init_ctx; goto skip_init_ctx;
} }
...@@ -1088,12 +1158,15 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) ...@@ -1088,12 +1158,15 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
ctx.epilogue_offset = ctx.idx; ctx.epilogue_offset = ctx.idx;
build_epilogue(&ctx); build_epilogue(&ctx);
extable_size = prog->aux->num_exentries * sizeof(struct exception_table_entry);
/* Now we know the actual image size. /* Now we know the actual image size.
* As each LoongArch instruction is of length 32bit, * As each LoongArch instruction is of length 32bit,
* we are translating number of JITed intructions into * we are translating number of JITed intructions into
* the size required to store these JITed code. * the size required to store these JITed code.
*/ */
image_size = sizeof(u32) * ctx.idx; prog_size = sizeof(u32) * ctx.idx;
image_size = prog_size + extable_size;
/* Now we know the size of the structure to make */ /* Now we know the size of the structure to make */
header = bpf_jit_binary_alloc(image_size, &image_ptr, header = bpf_jit_binary_alloc(image_size, &image_ptr,
sizeof(u32), jit_fill_hole); sizeof(u32), jit_fill_hole);
...@@ -1104,9 +1177,12 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) ...@@ -1104,9 +1177,12 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
/* 2. Now, the actual pass to generate final JIT code */ /* 2. Now, the actual pass to generate final JIT code */
ctx.image = (union loongarch_instruction *)image_ptr; ctx.image = (union loongarch_instruction *)image_ptr;
if (extable_size)
prog->aux->extable = (void *)image_ptr + prog_size;
skip_init_ctx: skip_init_ctx:
ctx.idx = 0; ctx.idx = 0;
ctx.num_exentries = 0;
build_prologue(&ctx); build_prologue(&ctx);
if (build_body(&ctx, extra_pass)) { if (build_body(&ctx, extra_pass)) {
...@@ -1125,7 +1201,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) ...@@ -1125,7 +1201,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
/* And we're done */ /* And we're done */
if (bpf_jit_enable > 1) if (bpf_jit_enable > 1)
bpf_jit_dump(prog->len, image_size, 2, ctx.image); bpf_jit_dump(prog->len, prog_size, 2, ctx.image);
/* Update the icache */ /* Update the icache */
flush_icache_range((unsigned long)header, (unsigned long)(ctx.image + ctx.idx)); flush_icache_range((unsigned long)header, (unsigned long)(ctx.image + ctx.idx));
...@@ -1147,7 +1223,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) ...@@ -1147,7 +1223,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
jit_data->header = header; jit_data->header = header;
} }
prog->jited = 1; prog->jited = 1;
prog->jited_len = image_size; prog->jited_len = prog_size;
prog->bpf_func = (void *)ctx.image; prog->bpf_func = (void *)ctx.image;
if (!prog->is_func || extra_pass) { if (!prog->is_func || extra_pass) {
......
...@@ -4,6 +4,7 @@ ...@@ -4,6 +4,7 @@
* *
* Copyright (C) 2022 Loongson Technology Corporation Limited * Copyright (C) 2022 Loongson Technology Corporation Limited
*/ */
#include <linux/bitfield.h>
#include <linux/bpf.h> #include <linux/bpf.h>
#include <linux/filter.h> #include <linux/filter.h>
#include <asm/cacheflush.h> #include <asm/cacheflush.h>
...@@ -15,6 +16,7 @@ struct jit_ctx { ...@@ -15,6 +16,7 @@ struct jit_ctx {
unsigned int flags; unsigned int flags;
unsigned int epilogue_offset; unsigned int epilogue_offset;
u32 *offset; u32 *offset;
int num_exentries;
union loongarch_instruction *image; union loongarch_instruction *image;
u32 stack_size; u32 stack_size;
}; };
......
...@@ -26,9 +26,12 @@ void pcibios_add_bus(struct pci_bus *bus) ...@@ -26,9 +26,12 @@ void pcibios_add_bus(struct pci_bus *bus)
int pcibios_root_bridge_prepare(struct pci_host_bridge *bridge) int pcibios_root_bridge_prepare(struct pci_host_bridge *bridge)
{ {
struct pci_config_window *cfg = bridge->bus->sysdata; struct acpi_device *adev = NULL;
struct acpi_device *adev = to_acpi_device(cfg->parent);
struct device *bus_dev = &bridge->bus->dev; struct device *bus_dev = &bridge->bus->dev;
struct pci_config_window *cfg = bridge->bus->sysdata;
if (!acpi_disabled)
adev = to_acpi_device(cfg->parent);
ACPI_COMPANION_SET(&bridge->dev, adev); ACPI_COMPANION_SET(&bridge->dev, adev);
set_dev_node(bus_dev, pa_to_nid(cfg->res.start)); set_dev_node(bus_dev, pa_to_nid(cfg->res.start));
......
obj-y += platform.o
obj-$(CONFIG_SUSPEND) += suspend.o suspend_asm.o
obj-$(CONFIG_HIBERNATION) += hibernate.o hibernate_asm.o
// SPDX-License-Identifier: GPL-2.0
#include <asm/fpu.h>
#include <asm/loongson.h>
#include <asm/sections.h>
#include <asm/tlbflush.h>
#include <linux/suspend.h>
static u32 saved_crmd;
static u32 saved_prmd;
static u32 saved_euen;
static u32 saved_ecfg;
static u64 saved_pcpu_base;
struct pt_regs saved_regs;
void save_processor_state(void)
{
saved_crmd = csr_read32(LOONGARCH_CSR_CRMD);
saved_prmd = csr_read32(LOONGARCH_CSR_PRMD);
saved_euen = csr_read32(LOONGARCH_CSR_EUEN);
saved_ecfg = csr_read32(LOONGARCH_CSR_ECFG);
saved_pcpu_base = csr_read64(PERCPU_BASE_KS);
if (is_fpu_owner())
save_fp(current);
}
void restore_processor_state(void)
{
csr_write32(saved_crmd, LOONGARCH_CSR_CRMD);
csr_write32(saved_prmd, LOONGARCH_CSR_PRMD);
csr_write32(saved_euen, LOONGARCH_CSR_EUEN);
csr_write32(saved_ecfg, LOONGARCH_CSR_ECFG);
csr_write64(saved_pcpu_base, PERCPU_BASE_KS);
if (is_fpu_owner())
restore_fp(current);
}
int pfn_is_nosave(unsigned long pfn)
{
unsigned long nosave_begin_pfn = PFN_DOWN(__pa(&__nosave_begin));
unsigned long nosave_end_pfn = PFN_UP(__pa(&__nosave_end));
return (pfn >= nosave_begin_pfn) && (pfn < nosave_end_pfn);
}
extern int swsusp_asm_suspend(void);
int swsusp_arch_suspend(void)
{
enable_pci_wakeup();
return swsusp_asm_suspend();
}
extern int swsusp_asm_resume(void);
int swsusp_arch_resume(void)
{
/* Avoid TLB mismatch during and after kernel resume */
local_flush_tlb_all();
return swsusp_asm_resume();
}
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Hibernation support specific for LoongArch
*
* Author: Huacai Chen <chenhuacai@loongson.cn>
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited
*/
#include <linux/linkage.h>
#include <asm/asm.h>
#include <asm/asm-offsets.h>
#include <asm/regdef.h>
.text
SYM_FUNC_START(swsusp_asm_suspend)
la.pcrel t0, saved_regs
PTR_S ra, t0, PT_R1
PTR_S tp, t0, PT_R2
PTR_S sp, t0, PT_R3
PTR_S u0, t0, PT_R21
PTR_S fp, t0, PT_R22
PTR_S s0, t0, PT_R23
PTR_S s1, t0, PT_R24
PTR_S s2, t0, PT_R25
PTR_S s3, t0, PT_R26
PTR_S s4, t0, PT_R27
PTR_S s5, t0, PT_R28
PTR_S s6, t0, PT_R29
PTR_S s7, t0, PT_R30
PTR_S s8, t0, PT_R31
b swsusp_save
SYM_FUNC_END(swsusp_asm_suspend)
SYM_FUNC_START(swsusp_asm_resume)
la.pcrel t0, restore_pblist
PTR_L t0, t0, 0
0:
PTR_L t1, t0, PBE_ADDRESS /* source */
PTR_L t2, t0, PBE_ORIG_ADDRESS /* destination */
PTR_LI t3, _PAGE_SIZE
PTR_ADD t3, t3, t1
1:
REG_L t8, t1, 0
REG_S t8, t2, 0
PTR_ADDI t1, t1, SZREG
PTR_ADDI t2, t2, SZREG
bne t1, t3, 1b
PTR_L t0, t0, PBE_NEXT
bnez t0, 0b
la.pcrel t0, saved_regs
PTR_L ra, t0, PT_R1
PTR_L tp, t0, PT_R2
PTR_L sp, t0, PT_R3
PTR_L u0, t0, PT_R21
PTR_L fp, t0, PT_R22
PTR_L s0, t0, PT_R23
PTR_L s1, t0, PT_R24
PTR_L s2, t0, PT_R25
PTR_L s3, t0, PT_R26
PTR_L s4, t0, PT_R27
PTR_L s5, t0, PT_R28
PTR_L s6, t0, PT_R29
PTR_L s7, t0, PT_R30
PTR_L s8, t0, PT_R31
PTR_LI a0, 0x0
jirl zero, ra, 0
SYM_FUNC_END(swsusp_asm_resume)
// SPDX-License-Identifier: GPL-2.0
/*
* Author: Huacai Chen <chenhuacai@loongson.cn>
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited
*/
#include <linux/acpi.h>
#include <linux/platform_device.h>
#include <asm/bootinfo.h>
#include <asm/loongson.h>
void enable_gpe_wakeup(void)
{
if (acpi_disabled)
return;
if (acpi_gbl_reduced_hardware)
return;
acpi_enable_all_wakeup_gpes();
}
void enable_pci_wakeup(void)
{
if (acpi_disabled)
return;
if (acpi_gbl_reduced_hardware)
return;
acpi_write_bit_register(ACPI_BITREG_PCIEXP_WAKE_STATUS, 1);
if (acpi_gbl_FADT.flags & ACPI_FADT_PCI_EXPRESS_WAKE)
acpi_write_bit_register(ACPI_BITREG_PCIEXP_WAKE_DISABLE, 0);
}
static int __init loongson3_acpi_suspend_init(void)
{
#ifdef CONFIG_ACPI
acpi_status status;
uint64_t suspend_addr = 0;
if (acpi_disabled || acpi_gbl_reduced_hardware)
return 0;
acpi_write_bit_register(ACPI_BITREG_SCI_ENABLE, 1);
status = acpi_evaluate_integer(NULL, "\\SADR", NULL, &suspend_addr);
if (ACPI_FAILURE(status) || !suspend_addr) {
pr_err("ACPI S3 is not support!\n");
return -1;
}
loongson_sysconf.suspend_addr = (u64)phys_to_virt(PHYSADDR(suspend_addr));
#endif
return 0;
}
device_initcall(loongson3_acpi_suspend_init);
// SPDX-License-Identifier: GPL-2.0
/*
* loongson-specific suspend support
*
* Author: Huacai Chen <chenhuacai@loongson.cn>
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited
*/
#include <linux/acpi.h>
#include <linux/pm.h>
#include <linux/suspend.h>
#include <asm/loongarch.h>
#include <asm/loongson.h>
#include <asm/setup.h>
#include <asm/time.h>
#include <asm/tlbflush.h>
u64 loongarch_suspend_addr;
struct saved_registers {
u32 ecfg;
u32 euen;
u64 pgd;
u64 kpgd;
u32 pwctl0;
u32 pwctl1;
};
static struct saved_registers saved_regs;
static void arch_common_suspend(void)
{
save_counter();
saved_regs.pgd = csr_read64(LOONGARCH_CSR_PGDL);
saved_regs.kpgd = csr_read64(LOONGARCH_CSR_PGDH);
saved_regs.pwctl0 = csr_read32(LOONGARCH_CSR_PWCTL0);
saved_regs.pwctl1 = csr_read32(LOONGARCH_CSR_PWCTL1);
saved_regs.ecfg = csr_read32(LOONGARCH_CSR_ECFG);
saved_regs.euen = csr_read32(LOONGARCH_CSR_EUEN);
loongarch_suspend_addr = loongson_sysconf.suspend_addr;
}
static void arch_common_resume(void)
{
sync_counter();
local_flush_tlb_all();
csr_write64(per_cpu_offset(0), PERCPU_BASE_KS);
csr_write64(eentry, LOONGARCH_CSR_EENTRY);
csr_write64(eentry, LOONGARCH_CSR_MERRENTRY);
csr_write64(tlbrentry, LOONGARCH_CSR_TLBRENTRY);
csr_write64(saved_regs.pgd, LOONGARCH_CSR_PGDL);
csr_write64(saved_regs.kpgd, LOONGARCH_CSR_PGDH);
csr_write32(saved_regs.pwctl0, LOONGARCH_CSR_PWCTL0);
csr_write32(saved_regs.pwctl1, LOONGARCH_CSR_PWCTL1);
csr_write32(saved_regs.ecfg, LOONGARCH_CSR_ECFG);
csr_write32(saved_regs.euen, LOONGARCH_CSR_EUEN);
}
int loongarch_acpi_suspend(void)
{
enable_gpe_wakeup();
enable_pci_wakeup();
arch_common_suspend();
/* processor specific suspend */
loongarch_suspend_enter();
arch_common_resume();
return 0;
}
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Sleep helper for Loongson-3 sleep mode.
*
* Author: Huacai Chen <chenhuacai@loongson.cn>
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited
*/
#include <asm/asm.h>
#include <asm/asmmacro.h>
#include <asm/addrspace.h>
#include <asm/loongarch.h>
#include <asm/stackframe.h>
/* preparatory stuff */
.macro SETUP_SLEEP
addi.d sp, sp, -PT_SIZE
st.d $r1, sp, PT_R1
st.d $r2, sp, PT_R2
st.d $r3, sp, PT_R3
st.d $r4, sp, PT_R4
st.d $r21, sp, PT_R21
st.d $r22, sp, PT_R22
st.d $r23, sp, PT_R23
st.d $r24, sp, PT_R24
st.d $r25, sp, PT_R25
st.d $r26, sp, PT_R26
st.d $r27, sp, PT_R27
st.d $r28, sp, PT_R28
st.d $r29, sp, PT_R29
st.d $r30, sp, PT_R30
st.d $r31, sp, PT_R31
la.pcrel t0, acpi_saved_sp
st.d sp, t0, 0
.endm
.macro SETUP_WAKEUP
ld.d $r1, sp, PT_R1
ld.d $r2, sp, PT_R2
ld.d $r3, sp, PT_R3
ld.d $r4, sp, PT_R4
ld.d $r21, sp, PT_R21
ld.d $r22, sp, PT_R22
ld.d $r23, sp, PT_R23
ld.d $r24, sp, PT_R24
ld.d $r25, sp, PT_R25
ld.d $r26, sp, PT_R26
ld.d $r27, sp, PT_R27
ld.d $r28, sp, PT_R28
ld.d $r29, sp, PT_R29
ld.d $r30, sp, PT_R30
ld.d $r31, sp, PT_R31
.endm
.text
.align 12
/* Sleep/wakeup code for Loongson-3 */
SYM_FUNC_START(loongarch_suspend_enter)
SETUP_SLEEP
bl __flush_cache_all
/* Pass RA and SP to BIOS */
addi.d a1, sp, 0
la.pcrel a0, loongarch_wakeup_start
la.pcrel t0, loongarch_suspend_addr
ld.d t0, t0, 0
jirl a0, t0, 0 /* Call BIOS's STR sleep routine */
/*
* This is where we return upon wakeup.
* Reload all of the registers and return.
*/
SYM_INNER_LABEL(loongarch_wakeup_start, SYM_L_GLOBAL)
li.d t0, CSR_DMW0_INIT # UC, PLV0
csrwr t0, LOONGARCH_CSR_DMWIN0
li.d t0, CSR_DMW1_INIT # CA, PLV0
csrwr t0, LOONGARCH_CSR_DMWIN1
la.abs t0, 0f
jr t0
0:
la.pcrel t0, acpi_saved_sp
ld.d sp, t0, 0
SETUP_WAKEUP
addi.d sp, sp, PT_SIZE
jr ra
SYM_FUNC_END(loongarch_suspend_enter)
...@@ -1523,6 +1523,14 @@ static int addend_mips_rel(struct elf_info *elf, Elf_Shdr *sechdr, Elf_Rela *r) ...@@ -1523,6 +1523,14 @@ static int addend_mips_rel(struct elf_info *elf, Elf_Shdr *sechdr, Elf_Rela *r)
#define R_RISCV_SUB32 39 #define R_RISCV_SUB32 39
#endif #endif
#ifndef EM_LOONGARCH
#define EM_LOONGARCH 258
#endif
#ifndef R_LARCH_SUB32
#define R_LARCH_SUB32 55
#endif
static void section_rela(const char *modname, struct elf_info *elf, static void section_rela(const char *modname, struct elf_info *elf,
Elf_Shdr *sechdr) Elf_Shdr *sechdr)
{ {
...@@ -1564,6 +1572,11 @@ static void section_rela(const char *modname, struct elf_info *elf, ...@@ -1564,6 +1572,11 @@ static void section_rela(const char *modname, struct elf_info *elf,
ELF_R_TYPE(r.r_info) == R_RISCV_SUB32) ELF_R_TYPE(r.r_info) == R_RISCV_SUB32)
continue; continue;
break; break;
case EM_LOONGARCH:
if (!strcmp("__ex_table", fromsec) &&
ELF_R_TYPE(r.r_info) == R_LARCH_SUB32)
continue;
break;
} }
sym = elf->symtab_start + r_sym; sym = elf->symtab_start + r_sym;
/* Skip special sections */ /* Skip special sections */
......
...@@ -38,6 +38,14 @@ ...@@ -38,6 +38,14 @@
#define R_AARCH64_ABS64 257 #define R_AARCH64_ABS64 257
#endif #endif
#ifndef EM_LOONGARCH
#define EM_LOONGARCH 258
#define R_LARCH_32 1
#define R_LARCH_64 2
#define R_LARCH_MARK_LA 20
#define R_LARCH_SOP_PUSH_PLT_PCREL 29
#endif
#define R_ARM_PC24 1 #define R_ARM_PC24 1
#define R_ARM_THM_CALL 10 #define R_ARM_THM_CALL 10
#define R_ARM_CALL 28 #define R_ARM_CALL 28
...@@ -441,6 +449,28 @@ static int arm64_is_fake_mcount(Elf64_Rel const *rp) ...@@ -441,6 +449,28 @@ static int arm64_is_fake_mcount(Elf64_Rel const *rp)
return ELF64_R_TYPE(w8(rp->r_info)) != R_AARCH64_CALL26; return ELF64_R_TYPE(w8(rp->r_info)) != R_AARCH64_CALL26;
} }
static int LARCH32_is_fake_mcount(Elf32_Rel const *rp)
{
switch (ELF64_R_TYPE(w(rp->r_info))) {
case R_LARCH_MARK_LA:
case R_LARCH_SOP_PUSH_PLT_PCREL:
return 0;
}
return 1;
}
static int LARCH64_is_fake_mcount(Elf64_Rel const *rp)
{
switch (ELF64_R_TYPE(w(rp->r_info))) {
case R_LARCH_MARK_LA:
case R_LARCH_SOP_PUSH_PLT_PCREL:
return 0;
}
return 1;
}
/* 64-bit EM_MIPS has weird ELF64_Rela.r_info. /* 64-bit EM_MIPS has weird ELF64_Rela.r_info.
* http://techpubs.sgi.com/library/manuals/4000/007-4658-001/pdf/007-4658-001.pdf * http://techpubs.sgi.com/library/manuals/4000/007-4658-001/pdf/007-4658-001.pdf
* We interpret Table 29 Relocation Operation (Elf64_Rel, Elf64_Rela) [p.40] * We interpret Table 29 Relocation Operation (Elf64_Rel, Elf64_Rela) [p.40]
...@@ -558,6 +588,7 @@ static int do_file(char const *const fname) ...@@ -558,6 +588,7 @@ static int do_file(char const *const fname)
break; break;
case EM_IA_64: reltype = R_IA64_IMM64; break; case EM_IA_64: reltype = R_IA64_IMM64; break;
case EM_MIPS: /* reltype: e_class */ break; case EM_MIPS: /* reltype: e_class */ break;
case EM_LOONGARCH: /* reltype: e_class */ break;
case EM_PPC: reltype = R_PPC_ADDR32; break; case EM_PPC: reltype = R_PPC_ADDR32; break;
case EM_PPC64: reltype = R_PPC64_ADDR64; break; case EM_PPC64: reltype = R_PPC64_ADDR64; break;
case EM_S390: /* reltype: e_class */ break; case EM_S390: /* reltype: e_class */ break;
...@@ -589,6 +620,10 @@ static int do_file(char const *const fname) ...@@ -589,6 +620,10 @@ static int do_file(char const *const fname)
reltype = R_MIPS_32; reltype = R_MIPS_32;
is_fake_mcount32 = MIPS32_is_fake_mcount; is_fake_mcount32 = MIPS32_is_fake_mcount;
} }
if (w2(ehdr->e_machine) == EM_LOONGARCH) {
reltype = R_LARCH_32;
is_fake_mcount32 = LARCH32_is_fake_mcount;
}
if (do32(ehdr, fname, reltype) < 0) if (do32(ehdr, fname, reltype) < 0)
goto out; goto out;
break; break;
...@@ -610,6 +645,10 @@ static int do_file(char const *const fname) ...@@ -610,6 +645,10 @@ static int do_file(char const *const fname)
Elf64_r_info = MIPS64_r_info; Elf64_r_info = MIPS64_r_info;
is_fake_mcount64 = MIPS64_is_fake_mcount; is_fake_mcount64 = MIPS64_is_fake_mcount;
} }
if (w2(ghdr->e_machine) == EM_LOONGARCH) {
reltype = R_LARCH_64;
is_fake_mcount64 = LARCH64_is_fake_mcount;
}
if (do64(ghdr, fname, reltype) < 0) if (do64(ghdr, fname, reltype) < 0)
goto out; goto out;
break; break;
......
...@@ -304,6 +304,7 @@ static int do_file(char const *const fname, void *addr) ...@@ -304,6 +304,7 @@ static int do_file(char const *const fname, void *addr)
switch (r2(&ehdr->e_machine)) { switch (r2(&ehdr->e_machine)) {
case EM_386: case EM_386:
case EM_AARCH64: case EM_AARCH64:
case EM_LOONGARCH:
case EM_RISCV: case EM_RISCV:
case EM_S390: case EM_S390:
case EM_X86_64: case EM_X86_64:
...@@ -317,7 +318,6 @@ static int do_file(char const *const fname, void *addr) ...@@ -317,7 +318,6 @@ static int do_file(char const *const fname, void *addr)
case EM_ARCOMPACT: case EM_ARCOMPACT:
case EM_ARCV2: case EM_ARCV2:
case EM_ARM: case EM_ARM:
case EM_LOONGARCH:
case EM_MICROBLAZE: case EM_MICROBLAZE:
case EM_MIPS: case EM_MIPS:
case EM_XTENSA: case EM_XTENSA:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment