Commit 948752d2 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'riscv-for-linus-6.11-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/riscv/linux

Pull RISC-V fixes from Palmer Dabbelt:

 - A fix to avoid dropping some of the internal pseudo-extensions, which
   breaks *envcfg dependency parsing

 - The kernel entry address is now aligned in purgatory, which avoids a
   misaligned load that can lead to crash on systems that don't support
   misaligned accesses early in boot

 - The FW_SFENCE_VMA_RECEIVED perf event was duplicated in a handful of
   perf JSON configurations, one of them been updated to
   FW_SFENCE_VMA_ASID_SENT

 - The starfive cache driver is now restricted to 64-bit systems, as it
   isn't 32-bit clean

 - A fix for to avoid aliasing legacy-mode perf counters with software
   perf counters

 - VM_FAULT_SIGSEGV is now handled in the page fault code

 - A fix for stalls during CPU hotplug due to IPIs being disabled

 - A fix for memblock bounds checking. This manifests as a crash on
   systems with discontinuous memory maps that have regions that don't
   fit in the linear map

* tag 'riscv-for-linus-6.11-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/riscv/linux:
  riscv: Fix linear mapping checks for non-contiguous memory regions
  RISC-V: Enable the IPI before workqueue_online_cpu()
  riscv/mm: Add handling for VM_FAULT_SIGSEGV in mm_fault_error()
  perf: riscv: Fix selecting counters in legacy mode
  cache: StarFive: Require a 64-bit system
  perf arch events: Fix duplicate RISC-V SBI firmware event name
  riscv/purgatory: align riscv_kernel_entry
  riscv: cpufeature: Do not drop Linux-internal extensions
parents 66242ef2 3b656442
...@@ -432,28 +432,26 @@ static void __init riscv_resolve_isa(unsigned long *source_isa, ...@@ -432,28 +432,26 @@ static void __init riscv_resolve_isa(unsigned long *source_isa,
bitmap_copy(prev_resolved_isa, resolved_isa, RISCV_ISA_EXT_MAX); bitmap_copy(prev_resolved_isa, resolved_isa, RISCV_ISA_EXT_MAX);
for_each_set_bit(bit, source_isa, RISCV_ISA_EXT_MAX) { for_each_set_bit(bit, source_isa, RISCV_ISA_EXT_MAX) {
ext = riscv_get_isa_ext_data(bit); ext = riscv_get_isa_ext_data(bit);
if (!ext)
continue;
if (ext->validate) { if (ext && ext->validate) {
ret = ext->validate(ext, resolved_isa); ret = ext->validate(ext, resolved_isa);
if (ret == -EPROBE_DEFER) { if (ret == -EPROBE_DEFER) {
loop = true; loop = true;
continue; continue;
} else if (ret) { } else if (ret) {
/* Disable the extension entirely */ /* Disable the extension entirely */
clear_bit(ext->id, source_isa); clear_bit(bit, source_isa);
continue; continue;
} }
} }
set_bit(ext->id, resolved_isa); set_bit(bit, resolved_isa);
/* No need to keep it in source isa now that it is enabled */ /* No need to keep it in source isa now that it is enabled */
clear_bit(ext->id, source_isa); clear_bit(bit, source_isa);
/* Single letter extensions get set in hwcap */ /* Single letter extensions get set in hwcap */
if (ext->id < RISCV_ISA_EXT_BASE) if (bit < RISCV_ISA_EXT_BASE)
*this_hwcap |= isa2hwcap[ext->id]; *this_hwcap |= isa2hwcap[bit];
} }
} while (loop && memcmp(prev_resolved_isa, resolved_isa, sizeof(prev_resolved_isa))); } while (loop && memcmp(prev_resolved_isa, resolved_isa, sizeof(prev_resolved_isa)));
} }
......
...@@ -71,7 +71,7 @@ void __init sbi_ipi_init(void) ...@@ -71,7 +71,7 @@ void __init sbi_ipi_init(void)
* the masking/unmasking of virtual IPIs is done * the masking/unmasking of virtual IPIs is done
* via generic IPI-Mux * via generic IPI-Mux
*/ */
cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, cpuhp_setup_state(CPUHP_AP_IRQ_RISCV_SBI_IPI_STARTING,
"irqchip/sbi-ipi:starting", "irqchip/sbi-ipi:starting",
sbi_ipi_starting_cpu, NULL); sbi_ipi_starting_cpu, NULL);
......
...@@ -61,26 +61,27 @@ static inline void no_context(struct pt_regs *regs, unsigned long addr) ...@@ -61,26 +61,27 @@ static inline void no_context(struct pt_regs *regs, unsigned long addr)
static inline void mm_fault_error(struct pt_regs *regs, unsigned long addr, vm_fault_t fault) static inline void mm_fault_error(struct pt_regs *regs, unsigned long addr, vm_fault_t fault)
{ {
if (!user_mode(regs)) {
no_context(regs, addr);
return;
}
if (fault & VM_FAULT_OOM) { if (fault & VM_FAULT_OOM) {
/* /*
* We ran out of memory, call the OOM killer, and return the userspace * We ran out of memory, call the OOM killer, and return the userspace
* (which will retry the fault, or kill us if we got oom-killed). * (which will retry the fault, or kill us if we got oom-killed).
*/ */
if (!user_mode(regs)) {
no_context(regs, addr);
return;
}
pagefault_out_of_memory(); pagefault_out_of_memory();
return; return;
} else if (fault & (VM_FAULT_SIGBUS | VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE)) { } else if (fault & (VM_FAULT_SIGBUS | VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE)) {
/* Kernel mode? Handle exceptions or die */ /* Kernel mode? Handle exceptions or die */
if (!user_mode(regs)) {
no_context(regs, addr);
return;
}
do_trap(regs, SIGBUS, BUS_ADRERR, addr); do_trap(regs, SIGBUS, BUS_ADRERR, addr);
return; return;
} else if (fault & VM_FAULT_SIGSEGV) {
do_trap(regs, SIGSEGV, SEGV_MAPERR, addr);
return;
} }
BUG(); BUG();
} }
......
...@@ -234,8 +234,6 @@ static void __init setup_bootmem(void) ...@@ -234,8 +234,6 @@ static void __init setup_bootmem(void)
*/ */
memblock_reserve(vmlinux_start, vmlinux_end - vmlinux_start); memblock_reserve(vmlinux_start, vmlinux_end - vmlinux_start);
phys_ram_end = memblock_end_of_DRAM();
/* /*
* Make sure we align the start of the memory on a PMD boundary so that * Make sure we align the start of the memory on a PMD boundary so that
* at worst, we map the linear mapping with PMD mappings. * at worst, we map the linear mapping with PMD mappings.
...@@ -250,6 +248,16 @@ static void __init setup_bootmem(void) ...@@ -250,6 +248,16 @@ static void __init setup_bootmem(void)
if (IS_ENABLED(CONFIG_64BIT) && IS_ENABLED(CONFIG_MMU)) if (IS_ENABLED(CONFIG_64BIT) && IS_ENABLED(CONFIG_MMU))
kernel_map.va_pa_offset = PAGE_OFFSET - phys_ram_base; kernel_map.va_pa_offset = PAGE_OFFSET - phys_ram_base;
/*
* The size of the linear page mapping may restrict the amount of
* usable RAM.
*/
if (IS_ENABLED(CONFIG_64BIT)) {
max_mapped_addr = __pa(PAGE_OFFSET) + KERN_VIRT_SIZE;
memblock_cap_memory_range(phys_ram_base,
max_mapped_addr - phys_ram_base);
}
/* /*
* Reserve physical address space that would be mapped to virtual * Reserve physical address space that would be mapped to virtual
* addresses greater than (void *)(-PAGE_SIZE) because: * addresses greater than (void *)(-PAGE_SIZE) because:
...@@ -266,6 +274,7 @@ static void __init setup_bootmem(void) ...@@ -266,6 +274,7 @@ static void __init setup_bootmem(void)
memblock_reserve(max_mapped_addr, (phys_addr_t)-max_mapped_addr); memblock_reserve(max_mapped_addr, (phys_addr_t)-max_mapped_addr);
} }
phys_ram_end = memblock_end_of_DRAM();
min_low_pfn = PFN_UP(phys_ram_base); min_low_pfn = PFN_UP(phys_ram_base);
max_low_pfn = max_pfn = PFN_DOWN(phys_ram_end); max_low_pfn = max_pfn = PFN_DOWN(phys_ram_end);
high_memory = (void *)(__va(PFN_PHYS(max_low_pfn))); high_memory = (void *)(__va(PFN_PHYS(max_low_pfn)));
...@@ -1284,8 +1293,6 @@ static void __init create_linear_mapping_page_table(void) ...@@ -1284,8 +1293,6 @@ static void __init create_linear_mapping_page_table(void)
if (start <= __pa(PAGE_OFFSET) && if (start <= __pa(PAGE_OFFSET) &&
__pa(PAGE_OFFSET) < end) __pa(PAGE_OFFSET) < end)
start = __pa(PAGE_OFFSET); start = __pa(PAGE_OFFSET);
if (end >= __pa(PAGE_OFFSET) + memory_limit)
end = __pa(PAGE_OFFSET) + memory_limit;
create_linear_mapping_range(start, end, 0, NULL); create_linear_mapping_range(start, end, 0, NULL);
} }
......
...@@ -7,6 +7,7 @@ ...@@ -7,6 +7,7 @@
* Author: Li Zhengyu (lizhengyu3@huawei.com) * Author: Li Zhengyu (lizhengyu3@huawei.com)
* *
*/ */
#include <asm/asm.h>
#include <linux/linkage.h> #include <linux/linkage.h>
.text .text
...@@ -34,6 +35,7 @@ SYM_CODE_END(purgatory_start) ...@@ -34,6 +35,7 @@ SYM_CODE_END(purgatory_start)
.data .data
.align LGREG
SYM_DATA(riscv_kernel_entry, .quad 0) SYM_DATA(riscv_kernel_entry, .quad 0)
.end .end
...@@ -18,6 +18,7 @@ config STARFIVE_STARLINK_CACHE ...@@ -18,6 +18,7 @@ config STARFIVE_STARLINK_CACHE
bool "StarFive StarLink Cache controller" bool "StarFive StarLink Cache controller"
depends on RISCV depends on RISCV
depends on ARCH_STARFIVE depends on ARCH_STARFIVE
depends on 64BIT
select RISCV_DMA_NONCOHERENT select RISCV_DMA_NONCOHERENT
select RISCV_NONSTANDARD_CACHE_OPS select RISCV_NONSTANDARD_CACHE_OPS
help help
......
...@@ -416,7 +416,7 @@ static int pmu_sbi_ctr_get_idx(struct perf_event *event) ...@@ -416,7 +416,7 @@ static int pmu_sbi_ctr_get_idx(struct perf_event *event)
* but not in the user access mode as we want to use the other counters * but not in the user access mode as we want to use the other counters
* that support sampling/filtering. * that support sampling/filtering.
*/ */
if (hwc->flags & PERF_EVENT_FLAG_LEGACY) { if ((hwc->flags & PERF_EVENT_FLAG_LEGACY) && (event->attr.type == PERF_TYPE_HARDWARE)) {
if (event->attr.config == PERF_COUNT_HW_CPU_CYCLES) { if (event->attr.config == PERF_COUNT_HW_CPU_CYCLES) {
cflags |= SBI_PMU_CFG_FLAG_SKIP_MATCH; cflags |= SBI_PMU_CFG_FLAG_SKIP_MATCH;
cmask = 1; cmask = 1;
......
...@@ -147,6 +147,7 @@ enum cpuhp_state { ...@@ -147,6 +147,7 @@ enum cpuhp_state {
CPUHP_AP_IRQ_LOONGARCH_STARTING, CPUHP_AP_IRQ_LOONGARCH_STARTING,
CPUHP_AP_IRQ_SIFIVE_PLIC_STARTING, CPUHP_AP_IRQ_SIFIVE_PLIC_STARTING,
CPUHP_AP_IRQ_RISCV_IMSIC_STARTING, CPUHP_AP_IRQ_RISCV_IMSIC_STARTING,
CPUHP_AP_IRQ_RISCV_SBI_IPI_STARTING,
CPUHP_AP_ARM_MVEBU_COHERENCY, CPUHP_AP_ARM_MVEBU_COHERENCY,
CPUHP_AP_PERF_X86_AMD_UNCORE_STARTING, CPUHP_AP_PERF_X86_AMD_UNCORE_STARTING,
CPUHP_AP_PERF_X86_STARTING, CPUHP_AP_PERF_X86_STARTING,
......
...@@ -36,7 +36,7 @@ ...@@ -36,7 +36,7 @@
"ArchStdEvent": "FW_SFENCE_VMA_RECEIVED" "ArchStdEvent": "FW_SFENCE_VMA_RECEIVED"
}, },
{ {
"ArchStdEvent": "FW_SFENCE_VMA_RECEIVED" "ArchStdEvent": "FW_SFENCE_VMA_ASID_SENT"
}, },
{ {
"ArchStdEvent": "FW_SFENCE_VMA_ASID_RECEIVED" "ArchStdEvent": "FW_SFENCE_VMA_ASID_RECEIVED"
......
...@@ -74,7 +74,7 @@ ...@@ -74,7 +74,7 @@
{ {
"PublicDescription": "Sent SFENCE.VMA with ASID request to other HART event", "PublicDescription": "Sent SFENCE.VMA with ASID request to other HART event",
"ConfigCode": "0x800000000000000c", "ConfigCode": "0x800000000000000c",
"EventName": "FW_SFENCE_VMA_RECEIVED", "EventName": "FW_SFENCE_VMA_ASID_SENT",
"BriefDescription": "Sent SFENCE.VMA with ASID request to other HART event" "BriefDescription": "Sent SFENCE.VMA with ASID request to other HART event"
}, },
{ {
......
...@@ -36,7 +36,7 @@ ...@@ -36,7 +36,7 @@
"ArchStdEvent": "FW_SFENCE_VMA_RECEIVED" "ArchStdEvent": "FW_SFENCE_VMA_RECEIVED"
}, },
{ {
"ArchStdEvent": "FW_SFENCE_VMA_RECEIVED" "ArchStdEvent": "FW_SFENCE_VMA_ASID_SENT"
}, },
{ {
"ArchStdEvent": "FW_SFENCE_VMA_ASID_RECEIVED" "ArchStdEvent": "FW_SFENCE_VMA_ASID_RECEIVED"
......
...@@ -36,7 +36,7 @@ ...@@ -36,7 +36,7 @@
"ArchStdEvent": "FW_SFENCE_VMA_RECEIVED" "ArchStdEvent": "FW_SFENCE_VMA_RECEIVED"
}, },
{ {
"ArchStdEvent": "FW_SFENCE_VMA_RECEIVED" "ArchStdEvent": "FW_SFENCE_VMA_ASID_SENT"
}, },
{ {
"ArchStdEvent": "FW_SFENCE_VMA_ASID_RECEIVED" "ArchStdEvent": "FW_SFENCE_VMA_ASID_RECEIVED"
......
...@@ -36,7 +36,7 @@ ...@@ -36,7 +36,7 @@
"ArchStdEvent": "FW_SFENCE_VMA_RECEIVED" "ArchStdEvent": "FW_SFENCE_VMA_RECEIVED"
}, },
{ {
"ArchStdEvent": "FW_SFENCE_VMA_RECEIVED" "ArchStdEvent": "FW_SFENCE_VMA_ASID_SENT"
}, },
{ {
"ArchStdEvent": "FW_SFENCE_VMA_ASID_RECEIVED" "ArchStdEvent": "FW_SFENCE_VMA_ASID_RECEIVED"
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment