Commit 4df7c5fd authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'riscv-for-linus-6.7-rc5' of git://git.kernel.org/pub/scm/linux/kernel/git/riscv/linux

Pull RISC-V fixes from Palmer Dabbelt:

 - A pair of fixes to the new module load-time relocation code

 - A fix for hwprobe overflowing on rv32

 - A fix for to correctly decode C.SWSP and C.SDSP, which manifests in
   misaligned access handling

 - A fix for a boot-time shadow call stack initialization ordering issue

 - A fix for Andes' errata probing, which was calling
   riscv_noncoherent_supported() too late in the boot process and
   triggering an oops

* tag 'riscv-for-linus-6.7-rc5' of git://git.kernel.org/pub/scm/linux/kernel/git/riscv/linux:
  riscv: errata: andes: Probe for IOCP only once in boot stage
  riscv: Fix SMP when shadow call stacks are enabled
  dt-bindings: perf: riscv,pmu: drop unneeded quotes
  riscv: fix misaligned access handling of C.SWSP and C.SDSP
  RISC-V: hwprobe: Always use u64 for extension bits
  Support rv32 ULEB128 test
  riscv: Correct type casting in module loading
  riscv: Safely remove entries from relocation list
parents a6adef89 ed5b7cfd
...@@ -90,7 +90,7 @@ properties: ...@@ -90,7 +90,7 @@ properties:
bitmap of all MHPMCOUNTERx that can monitor the range of events bitmap of all MHPMCOUNTERx that can monitor the range of events
dependencies: dependencies:
"riscv,event-to-mhpmevent": [ "riscv,event-to-mhpmcounters" ] riscv,event-to-mhpmevent: [ "riscv,event-to-mhpmcounters" ]
required: required:
- compatible - compatible
......
...@@ -38,29 +38,35 @@ static long ax45mp_iocp_sw_workaround(void) ...@@ -38,29 +38,35 @@ static long ax45mp_iocp_sw_workaround(void)
return ret.error ? 0 : ret.value; return ret.error ? 0 : ret.value;
} }
static bool errata_probe_iocp(unsigned int stage, unsigned long arch_id, unsigned long impid) static void errata_probe_iocp(unsigned int stage, unsigned long arch_id, unsigned long impid)
{ {
static bool done;
if (!IS_ENABLED(CONFIG_ERRATA_ANDES_CMO)) if (!IS_ENABLED(CONFIG_ERRATA_ANDES_CMO))
return false; return;
if (done)
return;
done = true;
if (arch_id != ANDESTECH_AX45MP_MARCHID || impid != ANDESTECH_AX45MP_MIMPID) if (arch_id != ANDESTECH_AX45MP_MARCHID || impid != ANDESTECH_AX45MP_MIMPID)
return false; return;
if (!ax45mp_iocp_sw_workaround()) if (!ax45mp_iocp_sw_workaround())
return false; return;
/* Set this just to make core cbo code happy */ /* Set this just to make core cbo code happy */
riscv_cbom_block_size = 1; riscv_cbom_block_size = 1;
riscv_noncoherent_supported(); riscv_noncoherent_supported();
return true;
} }
void __init_or_module andes_errata_patch_func(struct alt_entry *begin, struct alt_entry *end, void __init_or_module andes_errata_patch_func(struct alt_entry *begin, struct alt_entry *end,
unsigned long archid, unsigned long impid, unsigned long archid, unsigned long impid,
unsigned int stage) unsigned int stage)
{ {
errata_probe_iocp(stage, archid, impid); if (stage == RISCV_ALTERNATIVES_BOOT)
errata_probe_iocp(stage, archid, impid);
/* we have nothing to patch here ATM so just return back */ /* we have nothing to patch here ATM so just return back */
} }
...@@ -154,7 +154,6 @@ secondary_start_sbi: ...@@ -154,7 +154,6 @@ secondary_start_sbi:
XIP_FIXUP_OFFSET a3 XIP_FIXUP_OFFSET a3
add a3, a3, a1 add a3, a3, a1
REG_L sp, (a3) REG_L sp, (a3)
scs_load_current
.Lsecondary_start_common: .Lsecondary_start_common:
...@@ -165,6 +164,7 @@ secondary_start_sbi: ...@@ -165,6 +164,7 @@ secondary_start_sbi:
call relocate_enable_mmu call relocate_enable_mmu
#endif #endif
call .Lsetup_trap_vector call .Lsetup_trap_vector
scs_load_current
tail smp_callin tail smp_callin
#endif /* CONFIG_SMP */ #endif /* CONFIG_SMP */
......
...@@ -40,15 +40,6 @@ struct relocation_handlers { ...@@ -40,15 +40,6 @@ struct relocation_handlers {
long buffer); long buffer);
}; };
unsigned int initialize_relocation_hashtable(unsigned int num_relocations);
void process_accumulated_relocations(struct module *me);
int add_relocation_to_accumulate(struct module *me, int type, void *location,
unsigned int hashtable_bits, Elf_Addr v);
struct hlist_head *relocation_hashtable;
struct list_head used_buckets_list;
/* /*
* The auipc+jalr instruction pair can reach any PC-relative offset * The auipc+jalr instruction pair can reach any PC-relative offset
* in the range [-2^31 - 2^11, 2^31 - 2^11) * in the range [-2^31 - 2^11, 2^31 - 2^11)
...@@ -64,7 +55,7 @@ static bool riscv_insn_valid_32bit_offset(ptrdiff_t val) ...@@ -64,7 +55,7 @@ static bool riscv_insn_valid_32bit_offset(ptrdiff_t val)
static int riscv_insn_rmw(void *location, u32 keep, u32 set) static int riscv_insn_rmw(void *location, u32 keep, u32 set)
{ {
u16 *parcel = location; __le16 *parcel = location;
u32 insn = (u32)le16_to_cpu(parcel[0]) | (u32)le16_to_cpu(parcel[1]) << 16; u32 insn = (u32)le16_to_cpu(parcel[0]) | (u32)le16_to_cpu(parcel[1]) << 16;
insn &= keep; insn &= keep;
...@@ -77,7 +68,7 @@ static int riscv_insn_rmw(void *location, u32 keep, u32 set) ...@@ -77,7 +68,7 @@ static int riscv_insn_rmw(void *location, u32 keep, u32 set)
static int riscv_insn_rvc_rmw(void *location, u16 keep, u16 set) static int riscv_insn_rvc_rmw(void *location, u16 keep, u16 set)
{ {
u16 *parcel = location; __le16 *parcel = location;
u16 insn = le16_to_cpu(*parcel); u16 insn = le16_to_cpu(*parcel);
insn &= keep; insn &= keep;
...@@ -604,7 +595,10 @@ static const struct relocation_handlers reloc_handlers[] = { ...@@ -604,7 +595,10 @@ static const struct relocation_handlers reloc_handlers[] = {
/* 192-255 nonstandard ABI extensions */ /* 192-255 nonstandard ABI extensions */
}; };
void process_accumulated_relocations(struct module *me) static void
process_accumulated_relocations(struct module *me,
struct hlist_head **relocation_hashtable,
struct list_head *used_buckets_list)
{ {
/* /*
* Only ADD/SUB/SET/ULEB128 should end up here. * Only ADD/SUB/SET/ULEB128 should end up here.
...@@ -624,18 +618,25 @@ void process_accumulated_relocations(struct module *me) ...@@ -624,18 +618,25 @@ void process_accumulated_relocations(struct module *me)
* - Each relocation entry for a location address * - Each relocation entry for a location address
*/ */
struct used_bucket *bucket_iter; struct used_bucket *bucket_iter;
struct used_bucket *bucket_iter_tmp;
struct relocation_head *rel_head_iter; struct relocation_head *rel_head_iter;
struct hlist_node *rel_head_iter_tmp;
struct relocation_entry *rel_entry_iter; struct relocation_entry *rel_entry_iter;
struct relocation_entry *rel_entry_iter_tmp;
int curr_type; int curr_type;
void *location; void *location;
long buffer; long buffer;
list_for_each_entry(bucket_iter, &used_buckets_list, head) { list_for_each_entry_safe(bucket_iter, bucket_iter_tmp,
hlist_for_each_entry(rel_head_iter, bucket_iter->bucket, node) { used_buckets_list, head) {
hlist_for_each_entry_safe(rel_head_iter, rel_head_iter_tmp,
bucket_iter->bucket, node) {
buffer = 0; buffer = 0;
location = rel_head_iter->location; location = rel_head_iter->location;
list_for_each_entry(rel_entry_iter, list_for_each_entry_safe(rel_entry_iter,
rel_head_iter->rel_entry, head) { rel_entry_iter_tmp,
rel_head_iter->rel_entry,
head) {
curr_type = rel_entry_iter->type; curr_type = rel_entry_iter->type;
reloc_handlers[curr_type].reloc_handler( reloc_handlers[curr_type].reloc_handler(
me, &buffer, rel_entry_iter->value); me, &buffer, rel_entry_iter->value);
...@@ -648,11 +649,14 @@ void process_accumulated_relocations(struct module *me) ...@@ -648,11 +649,14 @@ void process_accumulated_relocations(struct module *me)
kfree(bucket_iter); kfree(bucket_iter);
} }
kfree(relocation_hashtable); kfree(*relocation_hashtable);
} }
int add_relocation_to_accumulate(struct module *me, int type, void *location, static int add_relocation_to_accumulate(struct module *me, int type,
unsigned int hashtable_bits, Elf_Addr v) void *location,
unsigned int hashtable_bits, Elf_Addr v,
struct hlist_head *relocation_hashtable,
struct list_head *used_buckets_list)
{ {
struct relocation_entry *entry; struct relocation_entry *entry;
struct relocation_head *rel_head; struct relocation_head *rel_head;
...@@ -661,6 +665,10 @@ int add_relocation_to_accumulate(struct module *me, int type, void *location, ...@@ -661,6 +665,10 @@ int add_relocation_to_accumulate(struct module *me, int type, void *location,
unsigned long hash; unsigned long hash;
entry = kmalloc(sizeof(*entry), GFP_KERNEL); entry = kmalloc(sizeof(*entry), GFP_KERNEL);
if (!entry)
return -ENOMEM;
INIT_LIST_HEAD(&entry->head); INIT_LIST_HEAD(&entry->head);
entry->type = type; entry->type = type;
entry->value = v; entry->value = v;
...@@ -669,7 +677,10 @@ int add_relocation_to_accumulate(struct module *me, int type, void *location, ...@@ -669,7 +677,10 @@ int add_relocation_to_accumulate(struct module *me, int type, void *location,
current_head = &relocation_hashtable[hash]; current_head = &relocation_hashtable[hash];
/* Find matching location (if any) */ /*
* Search for the relocation_head for the relocations that happen at the
* provided location
*/
bool found = false; bool found = false;
struct relocation_head *rel_head_iter; struct relocation_head *rel_head_iter;
...@@ -681,19 +692,45 @@ int add_relocation_to_accumulate(struct module *me, int type, void *location, ...@@ -681,19 +692,45 @@ int add_relocation_to_accumulate(struct module *me, int type, void *location,
} }
} }
/*
* If there has not yet been any relocations at the provided location,
* create a relocation_head for that location and populate it with this
* relocation_entry.
*/
if (!found) { if (!found) {
rel_head = kmalloc(sizeof(*rel_head), GFP_KERNEL); rel_head = kmalloc(sizeof(*rel_head), GFP_KERNEL);
if (!rel_head) {
kfree(entry);
return -ENOMEM;
}
rel_head->rel_entry = rel_head->rel_entry =
kmalloc(sizeof(struct list_head), GFP_KERNEL); kmalloc(sizeof(struct list_head), GFP_KERNEL);
if (!rel_head->rel_entry) {
kfree(entry);
kfree(rel_head);
return -ENOMEM;
}
INIT_LIST_HEAD(rel_head->rel_entry); INIT_LIST_HEAD(rel_head->rel_entry);
rel_head->location = location; rel_head->location = location;
INIT_HLIST_NODE(&rel_head->node); INIT_HLIST_NODE(&rel_head->node);
if (!current_head->first) { if (!current_head->first) {
bucket = bucket =
kmalloc(sizeof(struct used_bucket), GFP_KERNEL); kmalloc(sizeof(struct used_bucket), GFP_KERNEL);
if (!bucket) {
kfree(entry);
kfree(rel_head);
kfree(rel_head->rel_entry);
return -ENOMEM;
}
INIT_LIST_HEAD(&bucket->head); INIT_LIST_HEAD(&bucket->head);
bucket->bucket = current_head; bucket->bucket = current_head;
list_add(&bucket->head, &used_buckets_list); list_add(&bucket->head, used_buckets_list);
} }
hlist_add_head(&rel_head->node, current_head); hlist_add_head(&rel_head->node, current_head);
} }
...@@ -704,7 +741,9 @@ int add_relocation_to_accumulate(struct module *me, int type, void *location, ...@@ -704,7 +741,9 @@ int add_relocation_to_accumulate(struct module *me, int type, void *location,
return 0; return 0;
} }
unsigned int initialize_relocation_hashtable(unsigned int num_relocations) static unsigned int
initialize_relocation_hashtable(unsigned int num_relocations,
struct hlist_head **relocation_hashtable)
{ {
/* Can safely assume that bits is not greater than sizeof(long) */ /* Can safely assume that bits is not greater than sizeof(long) */
unsigned long hashtable_size = roundup_pow_of_two(num_relocations); unsigned long hashtable_size = roundup_pow_of_two(num_relocations);
...@@ -720,12 +759,13 @@ unsigned int initialize_relocation_hashtable(unsigned int num_relocations) ...@@ -720,12 +759,13 @@ unsigned int initialize_relocation_hashtable(unsigned int num_relocations)
hashtable_size <<= should_double_size; hashtable_size <<= should_double_size;
relocation_hashtable = kmalloc_array(hashtable_size, *relocation_hashtable = kmalloc_array(hashtable_size,
sizeof(*relocation_hashtable), sizeof(*relocation_hashtable),
GFP_KERNEL); GFP_KERNEL);
__hash_init(relocation_hashtable, hashtable_size); if (!*relocation_hashtable)
return -ENOMEM;
INIT_LIST_HEAD(&used_buckets_list); __hash_init(*relocation_hashtable, hashtable_size);
return hashtable_bits; return hashtable_bits;
} }
...@@ -742,7 +782,17 @@ int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab, ...@@ -742,7 +782,17 @@ int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab,
Elf_Addr v; Elf_Addr v;
int res; int res;
unsigned int num_relocations = sechdrs[relsec].sh_size / sizeof(*rel); unsigned int num_relocations = sechdrs[relsec].sh_size / sizeof(*rel);
unsigned int hashtable_bits = initialize_relocation_hashtable(num_relocations); struct hlist_head *relocation_hashtable;
struct list_head used_buckets_list;
unsigned int hashtable_bits;
hashtable_bits = initialize_relocation_hashtable(num_relocations,
&relocation_hashtable);
if (hashtable_bits < 0)
return hashtable_bits;
INIT_LIST_HEAD(&used_buckets_list);
pr_debug("Applying relocate section %u to %u\n", relsec, pr_debug("Applying relocate section %u to %u\n", relsec,
sechdrs[relsec].sh_info); sechdrs[relsec].sh_info);
...@@ -823,14 +873,18 @@ int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab, ...@@ -823,14 +873,18 @@ int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab,
} }
if (reloc_handlers[type].accumulate_handler) if (reloc_handlers[type].accumulate_handler)
res = add_relocation_to_accumulate(me, type, location, hashtable_bits, v); res = add_relocation_to_accumulate(me, type, location,
hashtable_bits, v,
relocation_hashtable,
&used_buckets_list);
else else
res = handler(me, location, v); res = handler(me, location, v);
if (res) if (res)
return res; return res;
} }
process_accumulated_relocations(me); process_accumulated_relocations(me, &relocation_hashtable,
&used_buckets_list);
return 0; return 0;
} }
......
...@@ -169,7 +169,7 @@ static void hwprobe_isa_ext0(struct riscv_hwprobe *pair, ...@@ -169,7 +169,7 @@ static void hwprobe_isa_ext0(struct riscv_hwprobe *pair,
pair->value &= ~missing; pair->value &= ~missing;
} }
static bool hwprobe_ext0_has(const struct cpumask *cpus, unsigned long ext) static bool hwprobe_ext0_has(const struct cpumask *cpus, u64 ext)
{ {
struct riscv_hwprobe pair; struct riscv_hwprobe pair;
......
...@@ -6,13 +6,13 @@ ...@@ -6,13 +6,13 @@
.text .text
.global test_uleb_basic .global test_uleb_basic
test_uleb_basic: test_uleb_basic:
ld a0, second lw a0, second
addi a0, a0, -127 addi a0, a0, -127
ret ret
.global test_uleb_large .global test_uleb_large
test_uleb_large: test_uleb_large:
ld a0, fourth lw a0, fourth
addi a0, a0, -0x07e8 addi a0, a0, -0x07e8
ret ret
...@@ -22,10 +22,10 @@ first: ...@@ -22,10 +22,10 @@ first:
second: second:
.reloc second, R_RISCV_SET_ULEB128, second .reloc second, R_RISCV_SET_ULEB128, second
.reloc second, R_RISCV_SUB_ULEB128, first .reloc second, R_RISCV_SUB_ULEB128, first
.dword 0 .word 0
third: third:
.space 1000 .space 1000
fourth: fourth:
.reloc fourth, R_RISCV_SET_ULEB128, fourth .reloc fourth, R_RISCV_SET_ULEB128, fourth
.reloc fourth, R_RISCV_SUB_ULEB128, third .reloc fourth, R_RISCV_SUB_ULEB128, third
.dword 0 .word 0
...@@ -550,16 +550,14 @@ int handle_misaligned_store(struct pt_regs *regs) ...@@ -550,16 +550,14 @@ int handle_misaligned_store(struct pt_regs *regs)
} else if ((insn & INSN_MASK_C_SD) == INSN_MATCH_C_SD) { } else if ((insn & INSN_MASK_C_SD) == INSN_MATCH_C_SD) {
len = 8; len = 8;
val.data_ulong = GET_RS2S(insn, regs); val.data_ulong = GET_RS2S(insn, regs);
} else if ((insn & INSN_MASK_C_SDSP) == INSN_MATCH_C_SDSP && } else if ((insn & INSN_MASK_C_SDSP) == INSN_MATCH_C_SDSP) {
((insn >> SH_RD) & 0x1f)) {
len = 8; len = 8;
val.data_ulong = GET_RS2C(insn, regs); val.data_ulong = GET_RS2C(insn, regs);
#endif #endif
} else if ((insn & INSN_MASK_C_SW) == INSN_MATCH_C_SW) { } else if ((insn & INSN_MASK_C_SW) == INSN_MATCH_C_SW) {
len = 4; len = 4;
val.data_ulong = GET_RS2S(insn, regs); val.data_ulong = GET_RS2S(insn, regs);
} else if ((insn & INSN_MASK_C_SWSP) == INSN_MATCH_C_SWSP && } else if ((insn & INSN_MASK_C_SWSP) == INSN_MATCH_C_SWSP) {
((insn >> SH_RD) & 0x1f)) {
len = 4; len = 4;
val.data_ulong = GET_RS2C(insn, regs); val.data_ulong = GET_RS2C(insn, regs);
} else if ((insn & INSN_MASK_C_FSD) == INSN_MATCH_C_FSD) { } else if ((insn & INSN_MASK_C_FSD) == INSN_MATCH_C_FSD) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment