Commit 693350a7 authored by Will Deacon's avatar Will Deacon

arm64: insn: Don't fallback on nosync path for general insn patching

Patching kernel instructions at runtime requires other CPUs to undergo
a context synchronisation event via an explicit ISB or an IPI in order
to ensure that the new instructions are visible. This is required even
for "hotpatch" instructions such as NOP and BL, so avoid optimising in
this case and always go via stop_machine() when performing general
patching.

ftrace isn't quite as strict, so it can continue to call the nosync
code directly.
Signed-off-by: default avatarWill Deacon <will.deacon@arm.com>
parent 3b8c9f1c
...@@ -446,8 +446,6 @@ u32 aarch64_insn_gen_prefetch(enum aarch64_insn_register base, ...@@ -446,8 +446,6 @@ u32 aarch64_insn_gen_prefetch(enum aarch64_insn_register base,
s32 aarch64_get_branch_offset(u32 insn); s32 aarch64_get_branch_offset(u32 insn);
u32 aarch64_set_branch_offset(u32 insn, s32 offset); u32 aarch64_set_branch_offset(u32 insn, s32 offset);
bool aarch64_insn_hotpatch_safe(u32 old_insn, u32 new_insn);
int aarch64_insn_patch_text_nosync(void *addr, u32 insn); int aarch64_insn_patch_text_nosync(void *addr, u32 insn);
int aarch64_insn_patch_text(void *addrs[], u32 insns[], int cnt); int aarch64_insn_patch_text(void *addrs[], u32 insns[], int cnt);
......
...@@ -149,20 +149,6 @@ int __kprobes aarch64_insn_write(void *addr, u32 insn) ...@@ -149,20 +149,6 @@ int __kprobes aarch64_insn_write(void *addr, u32 insn)
return __aarch64_insn_write(addr, cpu_to_le32(insn)); return __aarch64_insn_write(addr, cpu_to_le32(insn));
} }
static bool __kprobes __aarch64_insn_hotpatch_safe(u32 insn)
{
if (aarch64_get_insn_class(insn) != AARCH64_INSN_CLS_BR_SYS)
return false;
return aarch64_insn_is_b(insn) ||
aarch64_insn_is_bl(insn) ||
aarch64_insn_is_svc(insn) ||
aarch64_insn_is_hvc(insn) ||
aarch64_insn_is_smc(insn) ||
aarch64_insn_is_brk(insn) ||
aarch64_insn_is_nop(insn);
}
bool __kprobes aarch64_insn_uses_literal(u32 insn) bool __kprobes aarch64_insn_uses_literal(u32 insn)
{ {
/* ldr/ldrsw (literal), prfm */ /* ldr/ldrsw (literal), prfm */
...@@ -189,22 +175,6 @@ bool __kprobes aarch64_insn_is_branch(u32 insn) ...@@ -189,22 +175,6 @@ bool __kprobes aarch64_insn_is_branch(u32 insn)
aarch64_insn_is_bcond(insn); aarch64_insn_is_bcond(insn);
} }
/*
* ARM Architecture Reference Manual for ARMv8 Profile-A, Issue A.a
* Section B2.6.5 "Concurrent modification and execution of instructions":
* Concurrent modification and execution of instructions can lead to the
* resulting instruction performing any behavior that can be achieved by
* executing any sequence of instructions that can be executed from the
* same Exception level, except where the instruction before modification
* and the instruction after modification is a B, BL, NOP, BKPT, SVC, HVC,
* or SMC instruction.
*/
bool __kprobes aarch64_insn_hotpatch_safe(u32 old_insn, u32 new_insn)
{
return __aarch64_insn_hotpatch_safe(old_insn) &&
__aarch64_insn_hotpatch_safe(new_insn);
}
int __kprobes aarch64_insn_patch_text_nosync(void *addr, u32 insn) int __kprobes aarch64_insn_patch_text_nosync(void *addr, u32 insn)
{ {
u32 *tp = addr; u32 *tp = addr;
...@@ -239,11 +209,6 @@ static int __kprobes aarch64_insn_patch_text_cb(void *arg) ...@@ -239,11 +209,6 @@ static int __kprobes aarch64_insn_patch_text_cb(void *arg)
for (i = 0; ret == 0 && i < pp->insn_cnt; i++) for (i = 0; ret == 0 && i < pp->insn_cnt; i++)
ret = aarch64_insn_patch_text_nosync(pp->text_addrs[i], ret = aarch64_insn_patch_text_nosync(pp->text_addrs[i],
pp->new_insns[i]); pp->new_insns[i]);
/*
* aarch64_insn_patch_text_nosync() calls flush_icache_range(),
* which ends with "dsb; isb" pair guaranteeing global
* visibility.
*/
/* Notify other processors with an additional increment. */ /* Notify other processors with an additional increment. */
atomic_inc(&pp->cpu_count); atomic_inc(&pp->cpu_count);
} else { } else {
...@@ -255,8 +220,7 @@ static int __kprobes aarch64_insn_patch_text_cb(void *arg) ...@@ -255,8 +220,7 @@ static int __kprobes aarch64_insn_patch_text_cb(void *arg)
return ret; return ret;
} }
static int __kprobes aarch64_insn_patch_text(void *addrs[], u32 insns[], int cnt)
int __kprobes aarch64_insn_patch_text_sync(void *addrs[], u32 insns[], int cnt)
{ {
struct aarch64_insn_patch patch = { struct aarch64_insn_patch patch = {
.text_addrs = addrs, .text_addrs = addrs,
...@@ -272,24 +236,6 @@ int __kprobes aarch64_insn_patch_text_sync(void *addrs[], u32 insns[], int cnt) ...@@ -272,24 +236,6 @@ int __kprobes aarch64_insn_patch_text_sync(void *addrs[], u32 insns[], int cnt)
cpu_online_mask); cpu_online_mask);
} }
int __kprobes aarch64_insn_patch_text(void *addrs[], u32 insns[], int cnt)
{
int ret;
u32 insn;
/* Unsafe to patch multiple instructions without synchronizaiton */
if (cnt == 1) {
ret = aarch64_insn_read(addrs[0], &insn);
if (ret)
return ret;
if (aarch64_insn_hotpatch_safe(insn, insns[0]))
return aarch64_insn_patch_text_nosync(addrs[0], insns[0]);
}
return aarch64_insn_patch_text_sync(addrs, insns, cnt);
}
static int __kprobes aarch64_get_imm_shift_mask(enum aarch64_insn_imm_type type, static int __kprobes aarch64_get_imm_shift_mask(enum aarch64_insn_imm_type type,
u32 *maskp, int *shiftp) u32 *maskp, int *shiftp)
{ {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment