Commit 999324f5 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'loongarch-5.20' of...

Merge tag 'loongarch-5.20' of git://git.kernel.org/pub/scm/linux/kernel/git/chenhuacai/linux-loongson

Pull LoongArch updates from Huacai Chen:

 - Optimise getcpu() with vDSO

 - PCI enablement on top of pci & irqchip changes

 - Stack unwinder and stack trace support

 - Some bug fixes and build error fixes

 - Update the default config file

* tag 'loongarch-5.20' of git://git.kernel.org/pub/scm/linux/kernel/git/chenhuacai/linux-loongson:
  docs/zh_CN/LoongArch: Add I14 description
  docs/LoongArch: Add I14 description
  LoongArch: Update Loongson-3 default config file
  LoongArch: Add USER_STACKTRACE support
  LoongArch: Add STACKTRACE support
  LoongArch: Add prologue unwinder support
  LoongArch: Add guess unwinder support
  LoongArch: Add vDSO syscall __vdso_getcpu()
  LoongArch: Add PCI controller support
  LoongArch: Parse MADT to get multi-processor information
  LoongArch: Jump to the link address before enable PG
  LoongArch: Requires __force attributes for any casts
  LoongArch: Fix unsigned comparison with less than zero
  LoongArch: Adjust arch/loongarch/Kconfig
  LoongArch: cpuinfo: Fix a warning for CONFIG_CPUMASK_OFFSTACK
parents f7cdaeea 71535592
......@@ -221,7 +221,7 @@ I26 Opcode + I26L + I26H
=========== ==========================
Rd is the destination register operand, while Rj, Rk and Ra ("a" stands for
"additional") are the source register operands. I8/I12/I16/I21/I26 are
"additional") are the source register operands. I8/I12/I14/I16/I21/I26 are
immediate operands of respective width. The longer I21 and I26 are stored
in separate higher and lower parts in the instruction word, denoted by the "L"
and "H" suffixes.
......
......@@ -190,8 +190,8 @@ I26 Opcode + I26L + I26H
=========== ==========================
Opcode是指令操作码,Rj和Rk是源操作数(寄存器),Rd是目标操作数(寄存器),Ra是
4R-type格式特有的附加操作数(寄存器)。I8/I12/I16/I21/I26分别是8位/12位/16位/
21位/26位的立即数。其中较长的21位和26位立即数在指令字中被分割为高位部分与低位
4R-type格式特有的附加操作数(寄存器)。I8/I12/I14/I16/I21/I26分别是8位/12位/14位/
16位/21位/26位的立即数。其中较长的21位和26位立即数在指令字中被分割为高位部分与低位
部分,所以你们在这里的格式描述中能够看到I21L/I21H和I26L/I26H这样带后缀的表述。
指令列表
......
......@@ -2,7 +2,9 @@
config LOONGARCH
bool
default y
select ACPI
select ACPI_GENERIC_GSI if ACPI
select ACPI_MCFG if ACPI
select ACPI_SYSTEM_POWER_STATES_SUPPORT if ACPI
select ARCH_BINFMT_ELF_STATE
select ARCH_ENABLE_MEMORY_HOTPLUG
......@@ -40,6 +42,7 @@ config LOONGARCH
select ARCH_MIGHT_HAVE_PC_PARPORT
select ARCH_MIGHT_HAVE_PC_SERIO
select ARCH_SPARSEMEM_ENABLE
select ARCH_STACKWALK
select ARCH_SUPPORTS_ACPI
select ARCH_SUPPORTS_ATOMIC_RMW
select ARCH_SUPPORTS_HUGETLBFS
......@@ -51,6 +54,7 @@ config LOONGARCH
select ARCH_WANTS_NO_INSTR
select BUILDTIME_TABLE_SORT
select COMMON_CLK
select EFI
select GENERIC_CLOCKEVENTS
select GENERIC_CMOS_UPDATE
select GENERIC_CPU_AUTOPROBE
......@@ -86,6 +90,7 @@ config LOONGARCH
select HAVE_IRQ_TIME_ACCOUNTING
select HAVE_MOD_ARCH_SPECIFIC
select HAVE_NMI
select HAVE_PCI
select HAVE_PERF_EVENTS
select HAVE_REGS_AND_STACK_ACCESS_API
select HAVE_RSEQ
......@@ -95,20 +100,27 @@ config LOONGARCH
select HAVE_VIRT_CPU_ACCOUNTING_GEN if !SMP
select IRQ_FORCED_THREADING
select IRQ_LOONGARCH_CPU
select MMU_GATHER_MERGE_VMAS if MMU
select MODULES_USE_ELF_RELA if MODULES
select NEED_PER_CPU_EMBED_FIRST_CHUNK
select NEED_PER_CPU_PAGE_FIRST_CHUNK
select OF
select OF_EARLY_FLATTREE
select PCI
select PCI_DOMAINS_GENERIC
select PCI_ECAM if ACPI
select PCI_LOONGSON
select PCI_MSI_ARCH_FALLBACKS
select PERF_USE_VMALLOC
select RTC_LIB
select SMP
select SPARSE_IRQ
select SYSCTL_EXCEPTION_TRACE
select SWIOTLB
select TRACE_IRQFLAGS_SUPPORT
select USE_PERCPU_NUMA_NODE_ID
select USER_STACKTRACE_SUPPORT
select ZONE_DMA32
select MMU_GATHER_MERGE_VMAS if MMU
config 32BIT
bool
......@@ -141,6 +153,10 @@ config LOCKDEP_SUPPORT
bool
default y
config STACKTRACE_SUPPORT
bool
default y
# MACH_LOONGSON32 and MACH_LOONGSON64 are delibrately carried over from the
# MIPS Loongson code, to preserve Loongson-specific code paths in drivers that
# are shared between architectures, and specifically expecting the symbols.
......
choice
prompt "Choose kernel unwinder"
default UNWINDER_PROLOGUE if KALLSYMS
help
This determines which method will be used for unwinding kernel stack
traces for panics, oopses, bugs, warnings, perf, /proc/<pid>/stack,
lockdep, and more.
config UNWINDER_GUESS
bool "Guess unwinder"
help
This option enables the "guess" unwinder for unwinding kernel stack
traces. It scans the stack and reports every kernel text address it
finds. Some of the addresses it reports may be incorrect.
While this option often produces false positives, it can still be
useful in many cases.
config UNWINDER_PROLOGUE
bool "Prologue unwinder"
depends on KALLSYMS
help
This option enables the "prologue" unwinder for unwinding kernel stack
traces. It unwind the stack frame based on prologue code analyze. Symbol
information is needed, at least the address and length of each function.
Some of the addresses it reports may be incorrect (but better than the
Guess unwinder).
endchoice
......@@ -47,6 +47,8 @@ cflags-y += $(call cc-option, -mno-check-zero-division)
load-y = 0x9000000000200000
bootvars-y = VMLINUX_LOAD_ADDRESS=$(load-y)
drivers-$(CONFIG_PCI) += arch/loongarch/pci/
KBUILD_AFLAGS += $(cflags-y)
KBUILD_CFLAGS += $(cflags-y)
KBUILD_CPPFLAGS += -DVMLINUX_LOAD_ADDRESS=$(load-y)
......
......@@ -278,6 +278,8 @@ CONFIG_NET_ACT_IPT=m
CONFIG_NET_ACT_NAT=m
CONFIG_NET_ACT_BPF=m
CONFIG_OPENVSWITCH=m
CONFIG_VSOCKETS=m
CONFIG_VIRTIO_VSOCKETS=m
CONFIG_NETLINK_DIAG=y
CONFIG_CGROUP_NET_PRIO=y
CONFIG_BT=m
......@@ -289,6 +291,7 @@ CONFIG_MAC80211=m
CONFIG_RFKILL=m
CONFIG_RFKILL_INPUT=y
CONFIG_NET_9P=y
CONFIG_NET_9P_VIRTIO=y
CONFIG_CEPH_LIB=m
CONFIG_PCIEPORTBUS=y
CONFIG_HOTPLUG_PCI_PCIE=y
......@@ -308,6 +311,8 @@ CONFIG_RAPIDIO_MPORT_CDEV=m
CONFIG_UEVENT_HELPER=y
CONFIG_DEVTMPFS=y
CONFIG_DEVTMPFS_MOUNT=y
CONFIG_FW_LOADER_COMPRESS=y
CONFIG_FW_LOADER_COMPRESS_ZSTD=y
CONFIG_MTD=m
CONFIG_MTD_BLOCK=m
CONFIG_MTD_CFI=m
......@@ -328,8 +333,19 @@ CONFIG_BLK_DEV_CRYPTOLOOP=y
CONFIG_BLK_DEV_NBD=m
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=8192
CONFIG_VIRTIO_BLK=y
CONFIG_BLK_DEV_RBD=m
CONFIG_BLK_DEV_NVME=y
CONFIG_NVME_MULTIPATH=y
CONFIG_NVME_RDMA=m
CONFIG_NVME_FC=m
CONFIG_NVME_TCP=m
CONFIG_NVME_TARGET=m
CONFIG_NVME_TARGET_PASSTHRU=y
CONFIG_NVME_TARGET_LOOP=m
CONFIG_NVME_TARGET_RDMA=m
CONFIG_NVME_TARGET_FC=m
CONFIG_NVME_TARGET_TCP=m
CONFIG_EEPROM_AT24=m
CONFIG_BLK_DEV_SD=y
CONFIG_BLK_DEV_SR=y
......@@ -359,6 +375,7 @@ CONFIG_SCSI_QLA_FC=m
CONFIG_TCM_QLA2XXX=m
CONFIG_SCSI_QLA_ISCSI=m
CONFIG_SCSI_LPFC=m
CONFIG_SCSI_VIRTIO=m
CONFIG_ATA=y
CONFIG_SATA_AHCI=y
CONFIG_SATA_AHCI_PLATFORM=y
......@@ -403,6 +420,7 @@ CONFIG_VXLAN=y
CONFIG_RIONET=m
CONFIG_TUN=m
CONFIG_VETH=m
CONFIG_VIRTIO_NET=m
# CONFIG_NET_VENDOR_3COM is not set
# CONFIG_NET_VENDOR_ADAPTEC is not set
# CONFIG_NET_VENDOR_AGERE is not set
......@@ -527,10 +545,12 @@ CONFIG_SERIAL_8250_SHARE_IRQ=y
CONFIG_SERIAL_8250_RSA=y
CONFIG_SERIAL_NONSTANDARD=y
CONFIG_PRINTER=m
CONFIG_VIRTIO_CONSOLE=y
CONFIG_IPMI_HANDLER=m
CONFIG_IPMI_DEVICE_INTERFACE=m
CONFIG_IPMI_SI=m
CONFIG_HW_RANDOM=y
CONFIG_HW_RANDOM_VIRTIO=m
CONFIG_I2C_CHARDEV=y
CONFIG_I2C_PIIX4=y
CONFIG_I2C_GPIO=y
......@@ -568,6 +588,8 @@ CONFIG_DRM_AMDGPU_SI=y
CONFIG_DRM_AMDGPU_CIK=y
CONFIG_DRM_AMDGPU_USERPTR=y
CONFIG_DRM_AST=y
CONFIG_DRM_QXL=m
CONFIG_DRM_VIRTIO_GPU=m
CONFIG_FB=y
CONFIG_FB_EFI=y
CONFIG_FB_RADEON=y
......@@ -637,7 +659,16 @@ CONFIG_UIO=m
CONFIG_UIO_PDRV_GENIRQ=m
CONFIG_UIO_DMEM_GENIRQ=m
CONFIG_UIO_PCI_GENERIC=m
# CONFIG_VIRTIO_MENU is not set
CONFIG_VFIO=m
CONFIG_VFIO_PCI=m
CONFIG_VIRTIO_PCI=y
CONFIG_VIRTIO_BALLOON=m
CONFIG_VIRTIO_INPUT=m
CONFIG_VIRTIO_MMIO=m
CONFIG_VIRTIO_MMIO_CMDLINE_DEVICES=y
CONFIG_VHOST_NET=m
CONFIG_VHOST_SCSI=m
CONFIG_VHOST_VSOCK=m
CONFIG_COMEDI=m
CONFIG_COMEDI_PCI_DRIVERS=m
CONFIG_COMEDI_8255_PCI=m
......@@ -762,6 +793,7 @@ CONFIG_CRYPTO_USER_API_HASH=m
CONFIG_CRYPTO_USER_API_SKCIPHER=m
CONFIG_CRYPTO_USER_API_RNG=m
CONFIG_CRYPTO_USER_API_AEAD=m
CONFIG_CRYPTO_DEV_VIRTIO=m
CONFIG_PRINTK_TIME=y
CONFIG_STRIP_ASM_SYMS=y
CONFIG_MAGIC_SYSRQ=y
......
......@@ -28,10 +28,10 @@ struct loongson_board_info {
struct loongson_system_configuration {
int nr_cpus;
int nr_nodes;
int nr_io_pics;
int boot_cpu_id;
int cores_per_node;
int cores_per_package;
unsigned long cores_io_master;
const char *cpuname;
};
......
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited
*/
#ifndef __ASM_DMA_H
#define __ASM_DMA_H
#define MAX_DMA_ADDRESS PAGE_OFFSET
#define MAX_DMA32_PFN (1UL << (32 - PAGE_SHIFT))
#endif
......@@ -23,12 +23,33 @@ enum reg1i20_op {
lu32id_op = 0x0b,
};
enum reg1i21_op {
beqz_op = 0x10,
bnez_op = 0x11,
};
enum reg2i12_op {
addiw_op = 0x0a,
addid_op = 0x0b,
lu52id_op = 0x0c,
ldb_op = 0xa0,
ldh_op = 0xa1,
ldw_op = 0xa2,
ldd_op = 0xa3,
stb_op = 0xa4,
sth_op = 0xa5,
stw_op = 0xa6,
std_op = 0xa7,
};
enum reg2i16_op {
jirl_op = 0x13,
beq_op = 0x16,
bne_op = 0x17,
blt_op = 0x18,
bge_op = 0x19,
bltu_op = 0x1a,
bgeu_op = 0x1b,
};
struct reg0i26_format {
......@@ -110,6 +131,37 @@ enum loongarch_gpr {
LOONGARCH_GPR_MAX
};
#define is_imm12_negative(val) is_imm_negative(val, 12)
static inline bool is_imm_negative(unsigned long val, unsigned int bit)
{
return val & (1UL << (bit - 1));
}
static inline bool is_branch_ins(union loongarch_instruction *ip)
{
return ip->reg1i21_format.opcode >= beqz_op &&
ip->reg1i21_format.opcode <= bgeu_op;
}
static inline bool is_ra_save_ins(union loongarch_instruction *ip)
{
/* st.d $ra, $sp, offset */
return ip->reg2i12_format.opcode == std_op &&
ip->reg2i12_format.rj == LOONGARCH_GPR_SP &&
ip->reg2i12_format.rd == LOONGARCH_GPR_RA &&
!is_imm12_negative(ip->reg2i12_format.immediate);
}
static inline bool is_stack_alloc_ins(union loongarch_instruction *ip)
{
/* addi.d $sp, $sp, -imm */
return ip->reg2i12_format.opcode == addid_op &&
ip->reg2i12_format.rj == LOONGARCH_GPR_SP &&
ip->reg2i12_format.rd == LOONGARCH_GPR_SP &&
is_imm12_negative(ip->reg2i12_format.immediate);
}
u32 larch_insn_gen_lu32id(enum loongarch_gpr rd, int imm);
u32 larch_insn_gen_lu52id(enum loongarch_gpr rd, enum loongarch_gpr rj, int imm);
u32 larch_insn_gen_jirl(enum loongarch_gpr rd, enum loongarch_gpr rj, unsigned long pc, unsigned long dest);
......
......@@ -82,8 +82,6 @@ extern struct acpi_vector_group msi_group[MAX_IO_PICS];
#define GSI_MAX_PCH_IRQ (LOONGSON_PCH_IRQ_BASE + 256 - 1)
extern int find_pch_pic(u32 gsi);
extern int eiointc_get_node(int id);
struct acpi_madt_lio_pic;
struct acpi_madt_eio_pic;
struct acpi_madt_ht_pic;
......@@ -100,16 +98,8 @@ struct irq_domain *htvec_acpi_init(struct irq_domain *parent,
struct acpi_madt_ht_pic *acpi_htvec);
int pch_lpc_acpi_init(struct irq_domain *parent,
struct acpi_madt_lpc_pic *acpi_pchlpc);
#if IS_ENABLED(CONFIG_LOONGSON_PCH_MSI)
int pch_msi_acpi_init(struct irq_domain *parent,
struct acpi_madt_msi_pic *acpi_pchmsi);
#else
static inline int pch_msi_acpi_init(struct irq_domain *parent,
struct acpi_madt_msi_pic *acpi_pchmsi)
{
return 0;
}
#endif
int pch_pic_acpi_init(struct irq_domain *parent,
struct acpi_madt_bio_pic *acpi_pchpic);
int find_pch_pic(u32 gsi);
......
......@@ -33,8 +33,6 @@
#include <linux/kernel.h>
#include <linux/pfn.h>
#define MAX_DMA32_PFN (1UL << (32 - PAGE_SHIFT))
/*
* It's normally defined only for FLATMEM config but it's
* used in our early mem init code for all memory models.
......
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited
*/
#ifndef _ASM_PCI_H
#define _ASM_PCI_H
#include <linux/ioport.h>
#include <linux/list.h>
#include <linux/types.h>
#include <asm/io.h>
#define PCIBIOS_MIN_IO 0x4000
#define PCIBIOS_MIN_MEM 0x20000000
#define PCIBIOS_MIN_CARDBUS_IO 0x4000
#define HAVE_PCI_MMAP
#define pcibios_assign_all_busses() 0
extern phys_addr_t mcfg_addr_init(int node);
/* generic pci stuff */
#include <asm-generic/pci.h>
#endif /* _ASM_PCI_H */
......@@ -101,6 +101,10 @@ struct thread_struct {
unsigned long reg23, reg24, reg25, reg26; /* s0-s3 */
unsigned long reg27, reg28, reg29, reg30, reg31; /* s4-s8 */
/* __schedule() return address / call frame address */
unsigned long sched_ra;
unsigned long sched_cfa;
/* CSR registers */
unsigned long csr_prmd;
unsigned long csr_crmd;
......@@ -129,6 +133,9 @@ struct thread_struct {
struct loongarch_fpu fpu FPU_ALIGN;
};
#define thread_saved_ra(tsk) (tsk->thread.sched_ra)
#define thread_saved_fp(tsk) (tsk->thread.sched_cfa)
#define INIT_THREAD { \
/* \
* Main processor registers \
......@@ -145,6 +152,8 @@ struct thread_struct {
.reg29 = 0, \
.reg30 = 0, \
.reg31 = 0, \
.sched_ra = 0, \
.sched_cfa = 0, \
.csr_crmd = 0, \
.csr_prmd = 0, \
.csr_euen = 0, \
......
......@@ -10,6 +10,26 @@
#include <asm/loongarch.h>
#include <linux/stringify.h>
enum stack_type {
STACK_TYPE_UNKNOWN,
STACK_TYPE_IRQ,
STACK_TYPE_TASK,
};
struct stack_info {
enum stack_type type;
unsigned long begin, end, next_sp;
};
struct stack_frame {
unsigned long fp;
unsigned long ra;
};
bool in_irq_stack(unsigned long stack, struct stack_info *info);
bool in_task_stack(unsigned long stack, struct task_struct *task, struct stack_info *info);
int get_stack_info(unsigned long stack, struct task_struct *task, struct stack_info *info);
#define STR_LONG_L __stringify(LONG_L)
#define STR_LONG_S __stringify(LONG_S)
#define STR_LONGSIZE __stringify(LONGSIZE)
......
......@@ -15,12 +15,15 @@ struct task_struct;
* @prev: The task previously executed.
* @next: The task to begin executing.
* @next_ti: task_thread_info(next).
* @sched_ra: __schedule return address.
* @sched_cfa: __schedule call frame address.
*
* This function is used whilst scheduling to save the context of prev & load
* the context of next. Returns prev.
*/
extern asmlinkage struct task_struct *__switch_to(struct task_struct *prev,
struct task_struct *next, struct thread_info *next_ti);
struct task_struct *next, struct thread_info *next_ti,
void *sched_ra, void *sched_cfa);
/*
* For newly created kernel threads switch_to() will return to
......@@ -28,10 +31,11 @@ extern asmlinkage struct task_struct *__switch_to(struct task_struct *prev,
* That is, everything following __switch_to() will be skipped for new threads.
* So everything that matters to new threads should be placed before __switch_to().
*/
#define switch_to(prev, next, last) \
do { \
lose_fpu_inatomic(1, prev); \
(last) = __switch_to(prev, next, task_thread_info(next)); \
#define switch_to(prev, next, last) \
do { \
lose_fpu_inatomic(1, prev); \
(last) = __switch_to(prev, next, task_thread_info(next), \
__builtin_return_address(0), __builtin_frame_address(0)); \
} while (0)
#endif /* _ASM_SWITCH_TO_H */
......@@ -229,13 +229,13 @@ extern unsigned long __copy_user(void *to, const void *from, __kernel_size_t n);
static inline unsigned long __must_check
raw_copy_from_user(void *to, const void __user *from, unsigned long n)
{
return __copy_user(to, from, n);
return __copy_user(to, (__force const void *)from, n);
}
static inline unsigned long __must_check
raw_copy_to_user(void __user *to, const void *from, unsigned long n)
{
return __copy_user(to, from, n);
return __copy_user((__force void *)to, from, n);
}
#define INLINE_COPY_FROM_USER
......
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Most of this ideas comes from x86.
*
* Copyright (C) 2022 Loongson Technology Corporation Limited
*/
#ifndef _ASM_UNWIND_H
#define _ASM_UNWIND_H
#include <linux/sched.h>
#include <asm/stacktrace.h>
enum unwinder_type {
UNWINDER_GUESS,
UNWINDER_PROLOGUE,
};
struct unwind_state {
char type; /* UNWINDER_XXX */
struct stack_info stack_info;
struct task_struct *task;
bool first, error;
unsigned long sp, pc, ra;
};
void unwind_start(struct unwind_state *state,
struct task_struct *task, struct pt_regs *regs);
bool unwind_next_frame(struct unwind_state *state);
unsigned long unwind_get_return_address(struct unwind_state *state);
static inline bool unwind_done(struct unwind_state *state)
{
return state->stack_info.type == STACK_TYPE_UNKNOWN;
}
static inline bool unwind_error(struct unwind_state *state)
{
return state->error;
}
#endif /* _ASM_UNWIND_H */
......@@ -7,6 +7,7 @@
#ifndef __ASM_VDSO_H
#define __ASM_VDSO_H
#include <linux/mm.h>
#include <linux/mm_types.h>
#include <vdso/datapage.h>
......
......@@ -8,6 +8,18 @@
#include <asm/asm.h>
#include <asm/page.h>
#include <asm/vdso.h>
struct vdso_pcpu_data {
u32 node;
} ____cacheline_aligned_in_smp;
struct loongarch_vdso_data {
struct vdso_pcpu_data pdata[NR_CPUS];
struct vdso_data data[CS_BASES]; /* Arch-independent data */
};
#define VDSO_DATA_SIZE PAGE_ALIGN(sizeof(struct loongarch_vdso_data))
static inline unsigned long get_vdso_base(void)
{
......@@ -24,7 +36,8 @@ static inline unsigned long get_vdso_base(void)
static inline const struct vdso_data *get_vdso_data(void)
{
return (const struct vdso_data *)(get_vdso_base() - PAGE_SIZE);
return (const struct vdso_data *)(get_vdso_base()
- VDSO_DATA_SIZE + SMP_CACHE_BYTES * NR_CPUS);
}
#endif /* __ASSEMBLY__ */
......@@ -15,6 +15,7 @@ obj-$(CONFIG_EFI) += efi.o
obj-$(CONFIG_CPU_HAS_FPU) += fpu.o
obj-$(CONFIG_MODULES) += module.o module-sections.o
obj-$(CONFIG_STACKTRACE) += stacktrace.o
obj-$(CONFIG_PROC_FS) += proc.o
......@@ -22,4 +23,7 @@ obj-$(CONFIG_SMP) += smp.o
obj-$(CONFIG_NUMA) += numa.o
obj-$(CONFIG_UNWINDER_GUESS) += unwind_guess.o
obj-$(CONFIG_UNWINDER_PROLOGUE) += unwind_prologue.o
CPPFLAGS_vmlinux.lds := $(KBUILD_CFLAGS)
......@@ -104,6 +104,39 @@ static int set_processor_mask(u32 id, u32 flags)
}
#endif
static int __init
acpi_parse_processor(union acpi_subtable_headers *header, const unsigned long end)
{
struct acpi_madt_core_pic *processor = NULL;
processor = (struct acpi_madt_core_pic *)header;
if (BAD_MADT_ENTRY(processor, end))
return -EINVAL;
acpi_table_print_madt_entry(&header->common);
#ifdef CONFIG_SMP
set_processor_mask(processor->core_id, processor->flags);
#endif
return 0;
}
static int __init
acpi_parse_eio_master(union acpi_subtable_headers *header, const unsigned long end)
{
static int core = 0;
struct acpi_madt_eio_pic *eiointc = NULL;
eiointc = (struct acpi_madt_eio_pic *)header;
if (BAD_MADT_ENTRY(eiointc, end))
return -EINVAL;
core = eiointc->node * CORES_PER_EIO_NODE;
set_bit(core, &(loongson_sysconf.cores_io_master));
return 0;
}
static void __init acpi_process_madt(void)
{
#ifdef CONFIG_SMP
......@@ -114,6 +147,11 @@ static void __init acpi_process_madt(void)
__cpu_logical_map[i] = -1;
}
#endif
acpi_table_parse_madt(ACPI_MADT_TYPE_CORE_PIC,
acpi_parse_processor, MAX_CORE_PIC);
acpi_table_parse_madt(ACPI_MADT_TYPE_EIO_PIC,
acpi_parse_eio_master, MAX_IO_PICS);
loongson_sysconf.nr_cpus = num_processors;
}
......
......@@ -103,6 +103,8 @@ void output_thread_defines(void)
OFFSET(THREAD_REG29, task_struct, thread.reg29);
OFFSET(THREAD_REG30, task_struct, thread.reg30);
OFFSET(THREAD_REG31, task_struct, thread.reg31);
OFFSET(THREAD_SCHED_RA, task_struct, thread.sched_ra);
OFFSET(THREAD_SCHED_CFA, task_struct, thread.sched_cfa);
OFFSET(THREAD_CSRCRMD, task_struct,
thread.csr_crmd);
OFFSET(THREAD_CSRPRMD, task_struct,
......
......@@ -21,6 +21,12 @@ SYM_CODE_START(kernel_entry) # kernel entry point
csrwr t0, LOONGARCH_CSR_DMWIN0
li.d t0, CSR_DMW1_INIT # CA, PLV0, 0x9000 xxxx xxxx xxxx
csrwr t0, LOONGARCH_CSR_DMWIN1
/* We might not get launched at the address the kernel is linked to,
so we jump there. */
la.abs t0, 0f
jr t0
0:
/* Enable PG */
li.w t0, 0xb0 # PLV=0, IE=0, PG=1
csrwr t0, LOONGARCH_CSR_CRMD
......@@ -29,11 +35,6 @@ SYM_CODE_START(kernel_entry) # kernel entry point
li.w t0, 0x00 # FPE=0, SXE=0, ASXE=0, BTE=0
csrwr t0, LOONGARCH_CSR_EUEN
/* We might not get launched at the address the kernel is linked to,
so we jump there. */
la.abs t0, 0f
jr t0
0:
la t0, __bss_start # clear .bss
st.d zero, t0, 0
la t1, __bss_stop - LONGSIZE
......@@ -74,6 +75,11 @@ SYM_CODE_START(smpboot_entry)
csrwr t0, LOONGARCH_CSR_DMWIN0
li.d t0, CSR_DMW1_INIT # CA, PLV0
csrwr t0, LOONGARCH_CSR_DMWIN1
la.abs t0, 0f
jr t0
0:
/* Enable PG */
li.w t0, 0xb0 # PLV=0, IE=0, PG=1
csrwr t0, LOONGARCH_CSR_CRMD
li.w t0, 0x04 # PLV=0, PIE=1, PWE=0
......@@ -85,9 +91,6 @@ SYM_CODE_START(smpboot_entry)
ld.d sp, t0, CPU_BOOT_STACK
ld.d tp, t0, CPU_BOOT_TINFO
la.abs t0, 0f
jr t0
0:
bl start_secondary
SYM_CODE_END(smpboot_entry)
......
......@@ -106,7 +106,7 @@ static void *c_start(struct seq_file *m, loff_t *pos)
{
unsigned long i = *pos;
return i < NR_CPUS ? (void *)(i + 1) : NULL;
return i < nr_cpu_ids ? (void *)(i + 1) : NULL;
}
static void *c_next(struct seq_file *m, void *v, loff_t *pos)
......
......@@ -44,6 +44,7 @@
#include <asm/pgtable.h>
#include <asm/processor.h>
#include <asm/reg.h>
#include <asm/unwind.h>
#include <asm/vdso.h>
/*
......@@ -134,6 +135,7 @@ int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
childregs = (struct pt_regs *) childksp - 1;
/* Put the stack after the struct pt_regs. */
childksp = (unsigned long) childregs;
p->thread.sched_cfa = 0;
p->thread.csr_euen = 0;
p->thread.csr_crmd = csr_read32(LOONGARCH_CSR_CRMD);
p->thread.csr_prmd = csr_read32(LOONGARCH_CSR_PRMD);
......@@ -144,6 +146,7 @@ int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
p->thread.reg23 = (unsigned long)args->fn;
p->thread.reg24 = (unsigned long)args->fn_arg;
p->thread.reg01 = (unsigned long)ret_from_kernel_thread;
p->thread.sched_ra = (unsigned long)ret_from_kernel_thread;
memset(childregs, 0, sizeof(struct pt_regs));
childregs->csr_euen = p->thread.csr_euen;
childregs->csr_crmd = p->thread.csr_crmd;
......@@ -160,6 +163,7 @@ int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
p->thread.reg03 = (unsigned long) childregs;
p->thread.reg01 = (unsigned long) ret_from_fork;
p->thread.sched_ra = (unsigned long) ret_from_fork;
/*
* New tasks lose permission to use the fpu. This accelerates context
......@@ -180,7 +184,91 @@ int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
unsigned long __get_wchan(struct task_struct *task)
{
return 0;
unsigned long pc;
struct unwind_state state;
if (!try_get_task_stack(task))
return 0;
unwind_start(&state, task, NULL);
state.sp = thread_saved_fp(task);
get_stack_info(state.sp, state.task, &state.stack_info);
state.pc = thread_saved_ra(task);
#ifdef CONFIG_UNWINDER_PROLOGUE
state.type = UNWINDER_PROLOGUE;
#endif
for (; !unwind_done(&state); unwind_next_frame(&state)) {
pc = unwind_get_return_address(&state);
if (!pc)
break;
if (in_sched_functions(pc))
continue;
break;
}
put_task_stack(task);
return pc;
}
bool in_irq_stack(unsigned long stack, struct stack_info *info)
{
unsigned long nextsp;
unsigned long begin = (unsigned long)this_cpu_read(irq_stack);
unsigned long end = begin + IRQ_STACK_START;
if (stack < begin || stack >= end)
return false;
nextsp = *(unsigned long *)end;
if (nextsp & (SZREG - 1))
return false;
info->begin = begin;
info->end = end;
info->next_sp = nextsp;
info->type = STACK_TYPE_IRQ;
return true;
}
bool in_task_stack(unsigned long stack, struct task_struct *task,
struct stack_info *info)
{
unsigned long begin = (unsigned long)task_stack_page(task);
unsigned long end = begin + THREAD_SIZE - 32;
if (stack < begin || stack >= end)
return false;
info->begin = begin;
info->end = end;
info->next_sp = 0;
info->type = STACK_TYPE_TASK;
return true;
}
int get_stack_info(unsigned long stack, struct task_struct *task,
struct stack_info *info)
{
task = task ? : current;
if (!stack || stack & (SZREG - 1))
goto unknown;
if (in_task_stack(stack, task, info))
return 0;
if (task != current)
goto unknown;
if (in_irq_stack(stack, info))
return 0;
unknown:
info->type = STACK_TYPE_UNKNOWN;
return -EINVAL;
}
unsigned long stack_top(void)
......
......@@ -242,10 +242,7 @@ void loongson3_smp_finish(void)
static bool io_master(int cpu)
{
if (cpu == 0)
return true;
return false;
return test_bit(cpu, &loongson_sysconf.cores_io_master);
}
int loongson3_cpu_disable(void)
......
// SPDX-License-Identifier: GPL-2.0
/*
* Stack trace management functions
*
* Copyright (C) 2022 Loongson Technology Corporation Limited
*/
#include <linux/sched.h>
#include <linux/stacktrace.h>
#include <linux/uaccess.h>
#include <asm/stacktrace.h>
#include <asm/unwind.h>
void arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie,
struct task_struct *task, struct pt_regs *regs)
{
unsigned long addr;
struct pt_regs dummyregs;
struct unwind_state state;
regs = &dummyregs;
if (task == current) {
regs->regs[3] = (unsigned long)__builtin_frame_address(0);
regs->csr_era = (unsigned long)__builtin_return_address(0);
} else {
regs->regs[3] = thread_saved_fp(task);
regs->csr_era = thread_saved_ra(task);
}
regs->regs[1] = 0;
for (unwind_start(&state, task, regs);
!unwind_done(&state); unwind_next_frame(&state)) {
addr = unwind_get_return_address(&state);
if (!addr || !consume_entry(cookie, addr))
break;
}
}
static int
copy_stack_frame(unsigned long fp, struct stack_frame *frame)
{
int ret = 1;
unsigned long err;
unsigned long __user *user_frame_tail;
user_frame_tail = (unsigned long *)(fp - sizeof(struct stack_frame));
if (!access_ok(user_frame_tail, sizeof(*frame)))
return 0;
pagefault_disable();
err = (__copy_from_user_inatomic(frame, user_frame_tail, sizeof(*frame)));
if (err || (unsigned long)user_frame_tail >= frame->fp)
ret = 0;
pagefault_enable();
return ret;
}
void arch_stack_walk_user(stack_trace_consume_fn consume_entry, void *cookie,
const struct pt_regs *regs)
{
unsigned long fp = regs->regs[22];
while (fp && !((unsigned long)fp & 0xf)) {
struct stack_frame frame;
frame.fp = 0;
frame.ra = 0;
if (!copy_stack_frame(fp, &frame))
break;
if (!frame.ra)
break;
if (!consume_entry(cookie, frame.ra))
break;
fp = frame.fp;
}
}
......@@ -21,6 +21,8 @@ SYM_FUNC_START(__switch_to)
cpu_save_nonscratch a0
stptr.d ra, a0, THREAD_REG01
stptr.d a3, a0, THREAD_SCHED_RA
stptr.d a4, a0, THREAD_SCHED_CFA
move tp, a2
cpu_restore_nonscratch a1
......
......@@ -135,7 +135,7 @@ static int get_timer_irq(void)
int constant_clockevent_init(void)
{
unsigned int irq;
int irq;
unsigned int cpu = smp_processor_id();
unsigned long min_delta = 0x600;
unsigned long max_delta = (1UL << 48) - 1;
......
......@@ -43,6 +43,7 @@
#include <asm/stacktrace.h>
#include <asm/tlb.h>
#include <asm/types.h>
#include <asm/unwind.h>
#include "access-helper.h"
......@@ -64,19 +65,20 @@ static void show_backtrace(struct task_struct *task, const struct pt_regs *regs,
const char *loglvl, bool user)
{
unsigned long addr;
unsigned long *sp = (unsigned long *)(regs->regs[3] & ~3);
struct unwind_state state;
struct pt_regs *pregs = (struct pt_regs *)regs;
if (!task)
task = current;
if (user_mode(regs))
state.type = UNWINDER_GUESS;
printk("%sCall Trace:", loglvl);
#ifdef CONFIG_KALLSYMS
printk("%s\n", loglvl);
#endif
while (!kstack_end(sp)) {
if (__get_addr(&addr, sp++, user)) {
printk("%s (Bad stack address)", loglvl);
break;
}
if (__kernel_text_address(addr))
print_ip_sym(loglvl, addr);
for (unwind_start(&state, task, pregs);
!unwind_done(&state); unwind_next_frame(&state)) {
addr = unwind_get_return_address(&state);
print_ip_sym(loglvl, addr);
}
printk("%s\n", loglvl);
}
......
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2022 Loongson Technology Corporation Limited
*/
#include <linux/kernel.h>
#include <asm/unwind.h>
unsigned long unwind_get_return_address(struct unwind_state *state)
{
if (unwind_done(state))
return 0;
else if (state->first)
return state->pc;
return *(unsigned long *)(state->sp);
}
EXPORT_SYMBOL_GPL(unwind_get_return_address);
void unwind_start(struct unwind_state *state, struct task_struct *task,
struct pt_regs *regs)
{
memset(state, 0, sizeof(*state));
if (regs) {
state->sp = regs->regs[3];
state->pc = regs->csr_era;
}
state->task = task;
state->first = true;
get_stack_info(state->sp, state->task, &state->stack_info);
if (!unwind_done(state) && !__kernel_text_address(state->pc))
unwind_next_frame(state);
}
EXPORT_SYMBOL_GPL(unwind_start);
bool unwind_next_frame(struct unwind_state *state)
{
struct stack_info *info = &state->stack_info;
unsigned long addr;
if (unwind_done(state))
return false;
if (state->first)
state->first = false;
do {
for (state->sp += sizeof(unsigned long);
state->sp < info->end;
state->sp += sizeof(unsigned long)) {
addr = *(unsigned long *)(state->sp);
if (__kernel_text_address(addr))
return true;
}
state->sp = info->next_sp;
} while (!get_stack_info(state->sp, state->task, info));
return false;
}
EXPORT_SYMBOL_GPL(unwind_next_frame);
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2022 Loongson Technology Corporation Limited
*/
#include <linux/kallsyms.h>
#include <asm/inst.h>
#include <asm/ptrace.h>
#include <asm/unwind.h>
unsigned long unwind_get_return_address(struct unwind_state *state)
{
if (unwind_done(state))
return 0;
else if (state->type)
return state->pc;
else if (state->first)
return state->pc;
return *(unsigned long *)(state->sp);
}
EXPORT_SYMBOL_GPL(unwind_get_return_address);
static bool unwind_by_guess(struct unwind_state *state)
{
struct stack_info *info = &state->stack_info;
unsigned long addr;
for (state->sp += sizeof(unsigned long);
state->sp < info->end;
state->sp += sizeof(unsigned long)) {
addr = *(unsigned long *)(state->sp);
if (__kernel_text_address(addr))
return true;
}
return false;
}
static bool unwind_by_prologue(struct unwind_state *state)
{
struct stack_info *info = &state->stack_info;
union loongarch_instruction *ip, *ip_end;
unsigned long frame_size = 0, frame_ra = -1;
unsigned long size, offset, pc = state->pc;
if (state->sp >= info->end || state->sp < info->begin)
return false;
if (!kallsyms_lookup_size_offset(pc, &size, &offset))
return false;
ip = (union loongarch_instruction *)(pc - offset);
ip_end = (union loongarch_instruction *)pc;
while (ip < ip_end) {
if (is_stack_alloc_ins(ip)) {
frame_size = (1 << 12) - ip->reg2i12_format.immediate;
ip++;
break;
}
ip++;
}
if (!frame_size) {
if (state->first)
goto first;
return false;
}
while (ip < ip_end) {
if (is_ra_save_ins(ip)) {
frame_ra = ip->reg2i12_format.immediate;
break;
}
if (is_branch_ins(ip))
break;
ip++;
}
if (frame_ra < 0) {
if (state->first) {
state->sp = state->sp + frame_size;
goto first;
}
return false;
}
if (state->first)
state->first = false;
state->pc = *(unsigned long *)(state->sp + frame_ra);
state->sp = state->sp + frame_size;
return !!__kernel_text_address(state->pc);
first:
state->first = false;
if (state->pc == state->ra)
return false;
state->pc = state->ra;
return !!__kernel_text_address(state->ra);
}
void unwind_start(struct unwind_state *state, struct task_struct *task,
struct pt_regs *regs)
{
memset(state, 0, sizeof(*state));
if (regs && __kernel_text_address(regs->csr_era)) {
state->pc = regs->csr_era;
state->sp = regs->regs[3];
state->ra = regs->regs[1];
state->type = UNWINDER_PROLOGUE;
}
state->task = task;
state->first = true;
get_stack_info(state->sp, state->task, &state->stack_info);
if (!unwind_done(state) && !__kernel_text_address(state->pc))
unwind_next_frame(state);
}
EXPORT_SYMBOL_GPL(unwind_start);
bool unwind_next_frame(struct unwind_state *state)
{
struct stack_info *info = &state->stack_info;
struct pt_regs *regs;
unsigned long pc;
if (unwind_done(state))
return false;
do {
switch (state->type) {
case UNWINDER_GUESS:
state->first = false;
if (unwind_by_guess(state))
return true;
break;
case UNWINDER_PROLOGUE:
if (unwind_by_prologue(state))
return true;
if (info->type == STACK_TYPE_IRQ &&
info->end == state->sp) {
regs = (struct pt_regs *)info->next_sp;
pc = regs->csr_era;
if (user_mode(regs) || !__kernel_text_address(pc))
return false;
state->pc = pc;
state->sp = regs->regs[3];
state->ra = regs->regs[1];
state->first = true;
get_stack_info(state->sp, state->task, info);
return true;
}
}
state->sp = info->next_sp;
} while (!get_stack_info(state->sp, state->task, info));
return false;
}
EXPORT_SYMBOL_GPL(unwind_next_frame);
......@@ -25,12 +25,14 @@
extern char vdso_start[], vdso_end[];
/* Kernel-provided data used by the VDSO. */
static union loongarch_vdso_data {
u8 page[PAGE_SIZE];
struct vdso_data data[CS_BASES];
static union {
u8 page[VDSO_DATA_SIZE];
struct loongarch_vdso_data vdata;
} loongarch_vdso_data __page_aligned_data;
struct vdso_data *vdso_data = loongarch_vdso_data.data;
static struct page *vdso_pages[] = { NULL };
struct vdso_data *vdso_data = loongarch_vdso_data.vdata.data;
struct vdso_pcpu_data *vdso_pdata = loongarch_vdso_data.vdata.pdata;
static int vdso_mremap(const struct vm_special_mapping *sm, struct vm_area_struct *new_vma)
{
......@@ -55,11 +57,14 @@ struct loongarch_vdso_info vdso_info = {
static int __init init_vdso(void)
{
unsigned long i, pfn;
unsigned long i, cpu, pfn;
BUG_ON(!PAGE_ALIGNED(vdso_info.vdso));
BUG_ON(!PAGE_ALIGNED(vdso_info.size));
for_each_possible_cpu(cpu)
vdso_pdata[cpu].node = cpu_to_node(cpu);
pfn = __phys_to_pfn(__pa_symbol(vdso_info.vdso));
for (i = 0; i < vdso_info.size / PAGE_SIZE; i++)
vdso_info.code_mapping.pages[i] = pfn_to_page(pfn + i);
......@@ -93,9 +98,9 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
/*
* Determine total area size. This includes the VDSO data itself
* and the data page.
* and the data pages.
*/
vvar_size = PAGE_SIZE;
vvar_size = VDSO_DATA_SIZE;
size = vvar_size + info->size;
data_addr = get_unmapped_area(NULL, vdso_base(), size, 0, 0);
......@@ -103,7 +108,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
ret = data_addr;
goto out;
}
vdso_addr = data_addr + PAGE_SIZE;
vdso_addr = data_addr + VDSO_DATA_SIZE;
vma = _install_special_mapping(mm, data_addr, vvar_size,
VM_READ | VM_MAYREAD,
......@@ -115,8 +120,8 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
/* Map VDSO data page. */
ret = remap_pfn_range(vma, data_addr,
virt_to_phys(vdso_data) >> PAGE_SHIFT,
PAGE_SIZE, PAGE_READONLY);
virt_to_phys(&loongarch_vdso_data) >> PAGE_SHIFT,
vvar_size, PAGE_READONLY);
if (ret)
goto out;
......
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited
*/
#include <linux/pci.h>
#include <linux/acpi.h>
#include <linux/init.h>
#include <linux/irq.h>
#include <linux/slab.h>
#include <linux/pci-acpi.h>
#include <linux/pci-ecam.h>
#include <asm/pci.h>
#include <asm/numa.h>
#include <asm/loongson.h>
struct pci_root_info {
struct acpi_pci_root_info common;
struct pci_config_window *cfg;
};
void pcibios_add_bus(struct pci_bus *bus)
{
acpi_pci_add_bus(bus);
}
int pcibios_root_bridge_prepare(struct pci_host_bridge *bridge)
{
struct pci_config_window *cfg = bridge->bus->sysdata;
struct acpi_device *adev = to_acpi_device(cfg->parent);
struct device *bus_dev = &bridge->bus->dev;
ACPI_COMPANION_SET(&bridge->dev, adev);
set_dev_node(bus_dev, pa_to_nid(cfg->res.start));
return 0;
}
int acpi_pci_bus_find_domain_nr(struct pci_bus *bus)
{
struct pci_config_window *cfg = bus->sysdata;
struct acpi_device *adev = to_acpi_device(cfg->parent);
struct acpi_pci_root *root = acpi_driver_data(adev);
return root->segment;
}
static void acpi_release_root_info(struct acpi_pci_root_info *ci)
{
struct pci_root_info *info;
info = container_of(ci, struct pci_root_info, common);
pci_ecam_free(info->cfg);
kfree(ci->ops);
kfree(info);
}
static int acpi_prepare_root_resources(struct acpi_pci_root_info *ci)
{
int status;
struct resource_entry *entry, *tmp;
struct acpi_device *device = ci->bridge;
status = acpi_pci_probe_root_resources(ci);
if (status > 0) {
resource_list_for_each_entry_safe(entry, tmp, &ci->resources) {
if (entry->res->flags & IORESOURCE_MEM) {
entry->offset = ci->root->mcfg_addr & GENMASK_ULL(63, 40);
entry->res->start |= entry->offset;
entry->res->end |= entry->offset;
}
}
return status;
}
resource_list_for_each_entry_safe(entry, tmp, &ci->resources) {
dev_dbg(&device->dev,
"host bridge window %pR (ignored)\n", entry->res);
resource_list_destroy_entry(entry);
}
return 0;
}
/*
* Lookup the bus range for the domain in MCFG, and set up config space
* mapping.
*/
static struct pci_config_window *
pci_acpi_setup_ecam_mapping(struct acpi_pci_root *root)
{
int ret, bus_shift;
u16 seg = root->segment;
struct device *dev = &root->device->dev;
struct resource cfgres;
struct resource *bus_res = &root->secondary;
struct pci_config_window *cfg;
const struct pci_ecam_ops *ecam_ops;
ret = pci_mcfg_lookup(root, &cfgres, &ecam_ops);
if (ret < 0) {
dev_err(dev, "%04x:%pR ECAM region not found, use default value\n", seg, bus_res);
ecam_ops = &loongson_pci_ecam_ops;
root->mcfg_addr = mcfg_addr_init(0);
}
bus_shift = ecam_ops->bus_shift ? : 20;
cfgres.start = root->mcfg_addr + (bus_res->start << bus_shift);
cfgres.end = cfgres.start + (resource_size(bus_res) << bus_shift) - 1;
cfgres.flags = IORESOURCE_MEM;
cfg = pci_ecam_create(dev, &cfgres, bus_res, ecam_ops);
if (IS_ERR(cfg)) {
dev_err(dev, "%04x:%pR error %ld mapping ECAM\n", seg, bus_res, PTR_ERR(cfg));
return NULL;
}
return cfg;
}
struct pci_bus *pci_acpi_scan_root(struct acpi_pci_root *root)
{
struct pci_bus *bus;
struct pci_root_info *info;
struct acpi_pci_root_ops *root_ops;
int domain = root->segment;
int busnum = root->secondary.start;
info = kzalloc(sizeof(*info), GFP_KERNEL);
if (!info) {
pr_warn("pci_bus %04x:%02x: ignored (out of memory)\n", domain, busnum);
return NULL;
}
root_ops = kzalloc(sizeof(*root_ops), GFP_KERNEL);
if (!root_ops) {
kfree(info);
return NULL;
}
info->cfg = pci_acpi_setup_ecam_mapping(root);
if (!info->cfg) {
kfree(info);
kfree(root_ops);
return NULL;
}
root_ops->release_info = acpi_release_root_info;
root_ops->prepare_resources = acpi_prepare_root_resources;
root_ops->pci_ops = (struct pci_ops *)&info->cfg->ops->pci_ops;
bus = pci_find_bus(domain, busnum);
if (bus) {
memcpy(bus->sysdata, info->cfg, sizeof(struct pci_config_window));
kfree(info);
} else {
struct pci_bus *child;
bus = acpi_pci_root_create(root, root_ops,
&info->common, info->cfg);
if (!bus) {
kfree(info);
kfree(root_ops);
return NULL;
}
pci_bus_size_bridges(bus);
pci_bus_assign_resources(bus);
list_for_each_entry(child, &bus->children, node)
pcie_bus_configure_settings(child);
}
return bus;
}
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited
*/
#include <linux/kernel.h>
#include <linux/export.h>
#include <linux/init.h>
#include <linux/acpi.h>
#include <linux/types.h>
#include <linux/pci.h>
#include <linux/vgaarb.h>
#include <asm/loongson.h>
#define PCI_DEVICE_ID_LOONGSON_HOST 0x7a00
#define PCI_DEVICE_ID_LOONGSON_DC1 0x7a06
#define PCI_DEVICE_ID_LOONGSON_DC2 0x7a36
int raw_pci_read(unsigned int domain, unsigned int bus, unsigned int devfn,
int reg, int len, u32 *val)
{
struct pci_bus *bus_tmp = pci_find_bus(domain, bus);
if (bus_tmp)
return bus_tmp->ops->read(bus_tmp, devfn, reg, len, val);
return -EINVAL;
}
int raw_pci_write(unsigned int domain, unsigned int bus, unsigned int devfn,
int reg, int len, u32 val)
{
struct pci_bus *bus_tmp = pci_find_bus(domain, bus);
if (bus_tmp)
return bus_tmp->ops->write(bus_tmp, devfn, reg, len, val);
return -EINVAL;
}
phys_addr_t mcfg_addr_init(int node)
{
return (((u64)node << 44) | MCFG_EXT_PCICFG_BASE);
}
static int __init pcibios_init(void)
{
unsigned int lsize;
/*
* Set PCI cacheline size to that of the highest level in the
* cache hierarchy.
*/
lsize = cpu_dcache_line_size();
lsize = cpu_vcache_line_size() ? : lsize;
lsize = cpu_scache_line_size() ? : lsize;
BUG_ON(!lsize);
pci_dfl_cache_line_size = lsize >> 2;
pr_debug("PCI: pci_cache_line_size set to %d bytes\n", lsize);
return 0;
}
subsys_initcall(pcibios_init);
int pcibios_device_add(struct pci_dev *dev)
{
int id;
struct irq_domain *dom;
id = pci_domain_nr(dev->bus);
dom = irq_find_matching_fwnode(get_pch_msi_handle(id), DOMAIN_BUS_PCI_MSI);
dev_set_msi_domain(&dev->dev, dom);
return 0;
}
int pcibios_alloc_irq(struct pci_dev *dev)
{
if (acpi_disabled)
return 0;
if (pci_dev_msi_enabled(dev))
return 0;
return acpi_pci_irq_enable(dev);
}
static void pci_fixup_vgadev(struct pci_dev *pdev)
{
struct pci_dev *devp = NULL;
while ((devp = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, devp))) {
if (devp->vendor != PCI_VENDOR_ID_LOONGSON) {
vga_set_default_device(devp);
dev_info(&pdev->dev,
"Overriding boot device as %X:%X\n",
devp->vendor, devp->device);
}
}
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LOONGSON, PCI_DEVICE_ID_LOONGSON_DC1, pci_fixup_vgadev);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LOONGSON, PCI_DEVICE_ID_LOONGSON_DC2, pci_fixup_vgadev);
......@@ -6,7 +6,7 @@
ARCH_REL_TYPE_ABS := R_LARCH_32|R_LARCH_64|R_LARCH_MARK_LA|R_LARCH_JUMP_SLOT
include $(srctree)/lib/vdso/Makefile
obj-vdso-y := elf.o vgettimeofday.o sigreturn.o
obj-vdso-y := elf.o vgetcpu.o vgettimeofday.o sigreturn.o
# Common compiler flags between ABIs.
ccflags-vdso := \
......
......@@ -58,6 +58,7 @@ VERSION
{
LINUX_5.10 {
global:
__vdso_getcpu;
__vdso_clock_getres;
__vdso_clock_gettime;
__vdso_gettimeofday;
......
// SPDX-License-Identifier: GPL-2.0-only
/*
* Fast user context implementation of getcpu()
*/
#include <asm/vdso.h>
#include <linux/getcpu.h>
static __always_inline int read_cpu_id(void)
{
int cpu_id;
__asm__ __volatile__(
" rdtime.d $zero, %0\n"
: "=r" (cpu_id)
:
: "memory");
return cpu_id;
}
static __always_inline const struct vdso_pcpu_data *get_pcpu_data(void)
{
return (struct vdso_pcpu_data *)(get_vdso_base() - VDSO_DATA_SIZE);
}
int __vdso_getcpu(unsigned int *cpu, unsigned int *node, struct getcpu_cache *unused)
{
int cpu_id;
const struct vdso_pcpu_data *data;
cpu_id = read_cpu_id();
if (cpu)
*cpu = cpu_id;
if (node) {
data = get_pcpu_data();
*node = data[cpu_id].node;
}
return 0;
}
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment