Commit b881cdce authored by Will Deacon's avatar Will Deacon Committed by Marc Zyngier

KVM: arm64: Allocate hyp vectors statically

The EL2 vectors installed when a guest is running point at one of the
following configurations for a given CPU:

  - Straight at __kvm_hyp_vector
  - A trampoline containing an SMC sequence to mitigate Spectre-v2 and
    then a direct branch to __kvm_hyp_vector
  - A dynamically-allocated trampoline which has an indirect branch to
    __kvm_hyp_vector
  - A dynamically-allocated trampoline containing an SMC sequence to
    mitigate Spectre-v2 and then an indirect branch to __kvm_hyp_vector

The indirect branches mean that VA randomization at EL2 isn't trivially
bypassable using Spectre-v3a (where the vector base is readable by the
guest).

Rather than populate these vectors dynamically, configure everything
statically and use an enumerated type to identify the vector "slot"
corresponding to one of the configurations above. This both simplifies
the code, but also makes it much easier to implement at EL2 later on.
Signed-off-by: default avatarWill Deacon <will@kernel.org>
[maz: fixed double call to kvm_init_vector_slots() on nVHE]
Signed-off-by: default avatarMarc Zyngier <maz@kernel.org>
Cc: Marc Zyngier <maz@kernel.org>
Cc: Quentin Perret <qperret@google.com>
Link: https://lore.kernel.org/r/20201113113847.21619-8-will@kernel.org
parent da592e68
...@@ -34,8 +34,6 @@ ...@@ -34,8 +34,6 @@
*/ */
#define KVM_VECTOR_PREAMBLE (2 * AARCH64_INSN_SIZE) #define KVM_VECTOR_PREAMBLE (2 * AARCH64_INSN_SIZE)
#define __SMCCC_WORKAROUND_1_SMC_SZ 36
#define KVM_HOST_SMCCC_ID(id) \ #define KVM_HOST_SMCCC_ID(id) \
ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \ ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \
ARM_SMCCC_SMC_64, \ ARM_SMCCC_SMC_64, \
...@@ -175,7 +173,6 @@ extern unsigned long kvm_arm_hyp_percpu_base[NR_CPUS]; ...@@ -175,7 +173,6 @@ extern unsigned long kvm_arm_hyp_percpu_base[NR_CPUS];
DECLARE_KVM_NVHE_SYM(__per_cpu_start); DECLARE_KVM_NVHE_SYM(__per_cpu_start);
DECLARE_KVM_NVHE_SYM(__per_cpu_end); DECLARE_KVM_NVHE_SYM(__per_cpu_end);
extern atomic_t arm64_el2_vector_last_slot;
DECLARE_KVM_HYP_SYM(__bp_harden_hyp_vecs); DECLARE_KVM_HYP_SYM(__bp_harden_hyp_vecs);
#define __bp_harden_hyp_vecs CHOOSE_HYP_SYM(__bp_harden_hyp_vecs) #define __bp_harden_hyp_vecs CHOOSE_HYP_SYM(__bp_harden_hyp_vecs)
...@@ -198,8 +195,6 @@ extern void __vgic_v3_init_lrs(void); ...@@ -198,8 +195,6 @@ extern void __vgic_v3_init_lrs(void);
extern u32 __kvm_get_mdcr_el2(void); extern u32 __kvm_get_mdcr_el2(void);
extern char __smccc_workaround_1_smc[__SMCCC_WORKAROUND_1_SMC_SZ];
/* /*
* Obtain the PC-relative address of a kernel symbol * Obtain the PC-relative address of a kernel symbol
* s: symbol * s: symbol
......
...@@ -28,11 +28,41 @@ enum mitigation_state { ...@@ -28,11 +28,41 @@ enum mitigation_state {
struct task_struct; struct task_struct;
/*
* Note: the order of this enum corresponds to __bp_harden_hyp_vecs and
* we rely on having the direct vectors first.
*/
enum arm64_hyp_spectre_vector {
/*
* Take exceptions directly to __kvm_hyp_vector. This must be
* 0 so that it used by default when mitigations are not needed.
*/
HYP_VECTOR_DIRECT,
/*
* Bounce via a slot in the hypervisor text mapping of
* __bp_harden_hyp_vecs, which contains an SMC call.
*/
HYP_VECTOR_SPECTRE_DIRECT,
/*
* Bounce via a slot in a special mapping of __bp_harden_hyp_vecs
* next to the idmap page.
*/
HYP_VECTOR_INDIRECT,
/*
* Bounce via a slot in a special mapping of __bp_harden_hyp_vecs
* next to the idmap page, which contains an SMC call.
*/
HYP_VECTOR_SPECTRE_INDIRECT,
};
typedef void (*bp_hardening_cb_t)(void); typedef void (*bp_hardening_cb_t)(void);
struct bp_hardening_data { struct bp_hardening_data {
int hyp_vectors_slot; enum arm64_hyp_spectre_vector slot;
bp_hardening_cb_t fn; bp_hardening_cb_t fn;
}; };
DECLARE_PER_CPU_READ_MOSTLY(struct bp_hardening_data, bp_hardening_data); DECLARE_PER_CPU_READ_MOSTLY(struct bp_hardening_data, bp_hardening_data);
...@@ -53,6 +83,8 @@ enum mitigation_state arm64_get_spectre_v2_state(void); ...@@ -53,6 +83,8 @@ enum mitigation_state arm64_get_spectre_v2_state(void);
bool has_spectre_v2(const struct arm64_cpu_capabilities *cap, int scope); bool has_spectre_v2(const struct arm64_cpu_capabilities *cap, int scope);
void spectre_v2_enable_mitigation(const struct arm64_cpu_capabilities *__unused); void spectre_v2_enable_mitigation(const struct arm64_cpu_capabilities *__unused);
void cpu_el2_vector_harden_enable(const struct arm64_cpu_capabilities *__unused);
enum mitigation_state arm64_get_spectre_v4_state(void); enum mitigation_state arm64_get_spectre_v4_state(void);
bool has_spectre_v4(const struct arm64_cpu_capabilities *cap, int scope); bool has_spectre_v4(const struct arm64_cpu_capabilities *cap, int scope);
void spectre_v4_enable_mitigation(const struct arm64_cpu_capabilities *__unused); void spectre_v4_enable_mitigation(const struct arm64_cpu_capabilities *__unused);
......
...@@ -459,9 +459,11 @@ const struct arm64_cpu_capabilities arm64_errata[] = { ...@@ -459,9 +459,11 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
}, },
#ifdef CONFIG_RANDOMIZE_BASE #ifdef CONFIG_RANDOMIZE_BASE
{ {
/* Must come after the Spectre-v2 entry */
.desc = "EL2 vector hardening", .desc = "EL2 vector hardening",
.capability = ARM64_HARDEN_EL2_VECTORS, .capability = ARM64_HARDEN_EL2_VECTORS,
ERRATA_MIDR_RANGE_LIST(ca57_a72), ERRATA_MIDR_RANGE_LIST(ca57_a72),
.cpu_enable = cpu_el2_vector_harden_enable,
}, },
#endif #endif
{ {
......
...@@ -26,6 +26,7 @@ ...@@ -26,6 +26,7 @@
#include <asm/spectre.h> #include <asm/spectre.h>
#include <asm/traps.h> #include <asm/traps.h>
#include <asm/virt.h>
/* /*
* We try to ensure that the mitigation state can never change as the result of * We try to ensure that the mitigation state can never change as the result of
...@@ -169,72 +170,26 @@ bool has_spectre_v2(const struct arm64_cpu_capabilities *entry, int scope) ...@@ -169,72 +170,26 @@ bool has_spectre_v2(const struct arm64_cpu_capabilities *entry, int scope)
return true; return true;
} }
DEFINE_PER_CPU_READ_MOSTLY(struct bp_hardening_data, bp_hardening_data);
enum mitigation_state arm64_get_spectre_v2_state(void) enum mitigation_state arm64_get_spectre_v2_state(void)
{ {
return spectre_v2_state; return spectre_v2_state;
} }
#ifdef CONFIG_KVM DEFINE_PER_CPU_READ_MOSTLY(struct bp_hardening_data, bp_hardening_data);
#include <asm/cacheflush.h>
#include <asm/kvm_asm.h>
atomic_t arm64_el2_vector_last_slot = ATOMIC_INIT(-1);
static void __copy_hyp_vect_bpi(int slot, const char *hyp_vecs_start,
const char *hyp_vecs_end)
{
void *dst = lm_alias(__bp_harden_hyp_vecs + slot * SZ_2K);
int i;
for (i = 0; i < SZ_2K; i += 0x80)
memcpy(dst + i, hyp_vecs_start, hyp_vecs_end - hyp_vecs_start);
__flush_icache_range((uintptr_t)dst, (uintptr_t)dst + SZ_2K);
}
static void install_bp_hardening_cb(bp_hardening_cb_t fn) static void install_bp_hardening_cb(bp_hardening_cb_t fn)
{ {
static DEFINE_RAW_SPINLOCK(bp_lock); __this_cpu_write(bp_hardening_data.fn, fn);
int cpu, slot = -1;
const char *hyp_vecs_start = __smccc_workaround_1_smc;
const char *hyp_vecs_end = __smccc_workaround_1_smc +
__SMCCC_WORKAROUND_1_SMC_SZ;
/* /*
* Vinz Clortho takes the hyp_vecs start/end "keys" at * Vinz Clortho takes the hyp_vecs start/end "keys" at
* the door when we're a guest. Skip the hyp-vectors work. * the door when we're a guest. Skip the hyp-vectors work.
*/ */
if (!is_hyp_mode_available()) { if (!is_hyp_mode_available())
__this_cpu_write(bp_hardening_data.fn, fn);
return; return;
}
raw_spin_lock(&bp_lock); __this_cpu_write(bp_hardening_data.slot, HYP_VECTOR_SPECTRE_DIRECT);
for_each_possible_cpu(cpu) {
if (per_cpu(bp_hardening_data.fn, cpu) == fn) {
slot = per_cpu(bp_hardening_data.hyp_vectors_slot, cpu);
break;
}
}
if (slot == -1) {
slot = atomic_inc_return(&arm64_el2_vector_last_slot);
BUG_ON(slot >= BP_HARDEN_EL2_SLOTS);
__copy_hyp_vect_bpi(slot, hyp_vecs_start, hyp_vecs_end);
}
__this_cpu_write(bp_hardening_data.hyp_vectors_slot, slot);
__this_cpu_write(bp_hardening_data.fn, fn);
raw_spin_unlock(&bp_lock);
} }
#else
static void install_bp_hardening_cb(bp_hardening_cb_t fn)
{
__this_cpu_write(bp_hardening_data.fn, fn);
}
#endif /* CONFIG_KVM */
static void call_smc_arch_workaround_1(void) static void call_smc_arch_workaround_1(void)
{ {
...@@ -315,6 +270,14 @@ void spectre_v2_enable_mitigation(const struct arm64_cpu_capabilities *__unused) ...@@ -315,6 +270,14 @@ void spectre_v2_enable_mitigation(const struct arm64_cpu_capabilities *__unused)
update_mitigation_state(&spectre_v2_state, state); update_mitigation_state(&spectre_v2_state, state);
} }
void cpu_el2_vector_harden_enable(const struct arm64_cpu_capabilities *__unused)
{
struct bp_hardening_data *data = this_cpu_ptr(&bp_hardening_data);
if (this_cpu_has_cap(ARM64_HARDEN_EL2_VECTORS))
data->slot += HYP_VECTOR_INDIRECT;
}
/* /*
* Spectre v4. * Spectre v4.
* *
......
...@@ -51,14 +51,6 @@ DECLARE_KVM_HYP_PER_CPU(unsigned long, kvm_hyp_vector); ...@@ -51,14 +51,6 @@ DECLARE_KVM_HYP_PER_CPU(unsigned long, kvm_hyp_vector);
static DEFINE_PER_CPU(unsigned long, kvm_arm_hyp_stack_page); static DEFINE_PER_CPU(unsigned long, kvm_arm_hyp_stack_page);
unsigned long kvm_arm_hyp_percpu_base[NR_CPUS]; unsigned long kvm_arm_hyp_percpu_base[NR_CPUS];
/* Hypervisor VA of the indirect vector trampoline page */
static void *__kvm_bp_vect_base;
/*
* Slot in the hyp vector page for use by the indirect vector trampoline
* when mitigation against Spectre-v2 is not required.
*/
static int __kvm_harden_el2_vector_slot;
/* The VMID used in the VTTBR */ /* The VMID used in the VTTBR */
static atomic64_t kvm_vmid_gen = ATOMIC64_INIT(1); static atomic64_t kvm_vmid_gen = ATOMIC64_INIT(1);
static u32 kvm_next_vmid; static u32 kvm_next_vmid;
...@@ -1303,33 +1295,38 @@ static unsigned long nvhe_percpu_order(void) ...@@ -1303,33 +1295,38 @@ static unsigned long nvhe_percpu_order(void)
return size ? get_order(size) : 0; return size ? get_order(size) : 0;
} }
static int kvm_map_vectors(void) /* A lookup table holding the hypervisor VA for each vector slot */
static void *hyp_spectre_vector_selector[BP_HARDEN_EL2_SLOTS];
static void kvm_init_vector_slot(void *base, enum arm64_hyp_spectre_vector slot)
{ {
int slot; hyp_spectre_vector_selector[slot] = base + (slot * SZ_2K);
}
static int kvm_init_vector_slots(void)
{
int err;
void *base;
base = kern_hyp_va(kvm_ksym_ref(__kvm_hyp_vector));
kvm_init_vector_slot(base, HYP_VECTOR_DIRECT);
base = kern_hyp_va(kvm_ksym_ref(__bp_harden_hyp_vecs));
kvm_init_vector_slot(base, HYP_VECTOR_SPECTRE_DIRECT);
/*
* SV2 = ARM64_SPECTRE_V2
* HEL2 = ARM64_HARDEN_EL2_VECTORS
*
* !SV2 + !HEL2 -> use direct vectors
* SV2 + !HEL2 -> use hardened vectors in place
* !SV2 + HEL2 -> allocate one vector slot and use exec mapping
* SV2 + HEL2 -> use hardened vectors and use exec mapping
*/
if (!cpus_have_const_cap(ARM64_HARDEN_EL2_VECTORS)) if (!cpus_have_const_cap(ARM64_HARDEN_EL2_VECTORS))
return 0; return 0;
/* if (!has_vhe()) {
* Always allocate a spare vector slot, as we don't know yet which CPUs err = create_hyp_exec_mappings(__pa_symbol(__bp_harden_hyp_vecs),
* have a BP hardening slot that we can reuse. __BP_HARDEN_HYP_VECS_SZ, &base);
*/ if (err)
slot = atomic_inc_return(&arm64_el2_vector_last_slot); return err;
BUG_ON(slot >= BP_HARDEN_EL2_SLOTS); }
__kvm_harden_el2_vector_slot = slot;
return create_hyp_exec_mappings(__pa_symbol(__bp_harden_hyp_vecs), kvm_init_vector_slot(base, HYP_VECTOR_INDIRECT);
__BP_HARDEN_HYP_VECS_SZ, kvm_init_vector_slot(base, HYP_VECTOR_SPECTRE_INDIRECT);
&__kvm_bp_vect_base); return 0;
} }
static void cpu_init_hyp_mode(void) static void cpu_init_hyp_mode(void)
...@@ -1406,24 +1403,9 @@ static void cpu_hyp_reset(void) ...@@ -1406,24 +1403,9 @@ static void cpu_hyp_reset(void)
static void cpu_set_hyp_vector(void) static void cpu_set_hyp_vector(void)
{ {
struct bp_hardening_data *data = this_cpu_ptr(&bp_hardening_data); struct bp_hardening_data *data = this_cpu_ptr(&bp_hardening_data);
void *vect = kern_hyp_va(kvm_ksym_ref(__kvm_hyp_vector)); void *vector = hyp_spectre_vector_selector[data->slot];
int slot = -1;
if (cpus_have_const_cap(ARM64_SPECTRE_V2) && data->fn) {
vect = kern_hyp_va(kvm_ksym_ref(__bp_harden_hyp_vecs));
slot = data->hyp_vectors_slot;
}
if (this_cpu_has_cap(ARM64_HARDEN_EL2_VECTORS) && !has_vhe()) {
vect = __kvm_bp_vect_base;
if (slot == -1)
slot = __kvm_harden_el2_vector_slot;
}
if (slot != -1)
vect += slot * SZ_2K;
*this_cpu_ptr_hyp_sym(kvm_hyp_vector) = (unsigned long)vect; *this_cpu_ptr_hyp_sym(kvm_hyp_vector) = (unsigned long)vector;
} }
static void cpu_hyp_reinit(void) static void cpu_hyp_reinit(void)
...@@ -1661,12 +1643,6 @@ static int init_hyp_mode(void) ...@@ -1661,12 +1643,6 @@ static int init_hyp_mode(void)
goto out_err; goto out_err;
} }
err = kvm_map_vectors();
if (err) {
kvm_err("Cannot map vectors\n");
goto out_err;
}
/* /*
* Map the Hyp stack pages * Map the Hyp stack pages
*/ */
...@@ -1810,6 +1786,12 @@ int kvm_arch_init(void *opaque) ...@@ -1810,6 +1786,12 @@ int kvm_arch_init(void *opaque)
goto out_err; goto out_err;
} }
err = kvm_init_vector_slots();
if (err) {
kvm_err("Cannot initialise vector slots\n");
goto out_err;
}
err = init_subsystems(); err = init_subsystems();
if (err) if (err)
goto out_hyp; goto out_hyp;
......
...@@ -10,4 +10,4 @@ subdir-ccflags-y := -I$(incdir) \ ...@@ -10,4 +10,4 @@ subdir-ccflags-y := -I$(incdir) \
-DDISABLE_BRANCH_PROFILING \ -DDISABLE_BRANCH_PROFILING \
$(DISABLE_STACKLEAK_PLUGIN) $(DISABLE_STACKLEAK_PLUGIN)
obj-$(CONFIG_KVM) += vhe/ nvhe/ pgtable.o smccc_wa.o obj-$(CONFIG_KVM) += vhe/ nvhe/ pgtable.o
...@@ -188,52 +188,62 @@ SYM_CODE_START(__kvm_hyp_vector) ...@@ -188,52 +188,62 @@ SYM_CODE_START(__kvm_hyp_vector)
valid_vect el1_error // Error 32-bit EL1 valid_vect el1_error // Error 32-bit EL1
SYM_CODE_END(__kvm_hyp_vector) SYM_CODE_END(__kvm_hyp_vector)
.macro hyp_ventry .macro spectrev2_smccc_wa1_smc
.align 7 sub sp, sp, #(8 * 4)
stp x2, x3, [sp, #(8 * 0)]
stp x0, x1, [sp, #(8 * 2)]
mov w0, #ARM_SMCCC_ARCH_WORKAROUND_1
smc #0
ldp x2, x3, [sp, #(8 * 0)]
add sp, sp, #(8 * 2)
.endm
.macro hyp_ventry indirect, spectrev2
.align 7
1: esb 1: esb
.rept 26 .if \spectrev2 != 0
nop spectrev2_smccc_wa1_smc
.endr .else
/*
* The default sequence is to directly branch to the KVM vectors,
* using the computed offset. This applies for VHE as well as
* !ARM64_HARDEN_EL2_VECTORS. The first vector must always run the preamble.
*
* For ARM64_HARDEN_EL2_VECTORS configurations, this gets replaced
* with:
*
* stp x0, x1, [sp, #-16]!
* movz x0, #(addr & 0xffff)
* movk x0, #((addr >> 16) & 0xffff), lsl #16
* movk x0, #((addr >> 32) & 0xffff), lsl #32
* br x0
*
* Where:
* addr = kern_hyp_va(__kvm_hyp_vector) + vector-offset + KVM_VECTOR_PREAMBLE.
* See kvm_patch_vector_branch for details.
*/
alternative_cb kvm_patch_vector_branch
stp x0, x1, [sp, #-16]! stp x0, x1, [sp, #-16]!
b __kvm_hyp_vector + (1b - 0b + KVM_VECTOR_PREAMBLE) .endif
.if \indirect != 0
alternative_cb kvm_patch_vector_branch
/*
* For ARM64_HARDEN_EL2_VECTORS configurations, these NOPs get replaced
* with:
*
* movz x0, #(addr & 0xffff)
* movk x0, #((addr >> 16) & 0xffff), lsl #16
* movk x0, #((addr >> 32) & 0xffff), lsl #32
* br x0
*
* Where:
* addr = kern_hyp_va(__kvm_hyp_vector) + vector-offset + KVM_VECTOR_PREAMBLE.
* See kvm_patch_vector_branch for details.
*/
nop nop
nop nop
nop nop
alternative_cb_end nop
alternative_cb_end
.endif
b __kvm_hyp_vector + (1b - 0b + KVM_VECTOR_PREAMBLE)
.endm .endm
.macro generate_vectors .macro generate_vectors indirect, spectrev2
0: 0:
.rept 16 .rept 16
hyp_ventry hyp_ventry \indirect, \spectrev2
.endr .endr
.org 0b + SZ_2K // Safety measure .org 0b + SZ_2K // Safety measure
.endm .endm
.align 11 .align 11
SYM_CODE_START(__bp_harden_hyp_vecs) SYM_CODE_START(__bp_harden_hyp_vecs)
.rept BP_HARDEN_EL2_SLOTS generate_vectors indirect = 0, spectrev2 = 0 // HYP_VECTOR_DIRECT
generate_vectors generate_vectors indirect = 0, spectrev2 = 1 // HYP_VECTOR_SPECTRE_DIRECT
.endr generate_vectors indirect = 1, spectrev2 = 0 // HYP_VECTOR_INDIRECT
generate_vectors indirect = 1, spectrev2 = 1 // HYP_VECTOR_SPECTRE_INDIRECT
1: .org __bp_harden_hyp_vecs + __BP_HARDEN_HYP_VECS_SZ 1: .org __bp_harden_hyp_vecs + __BP_HARDEN_HYP_VECS_SZ
.org 1b .org 1b
SYM_CODE_END(__bp_harden_hyp_vecs) SYM_CODE_END(__bp_harden_hyp_vecs)
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2015-2018 - ARM Ltd
* Author: Marc Zyngier <marc.zyngier@arm.com>
*/
#include <linux/arm-smccc.h>
#include <linux/linkage.h>
#include <asm/kvm_asm.h>
#include <asm/kvm_mmu.h>
/*
* This is not executed directly and is instead copied into the vectors
* by install_bp_hardening_cb().
*/
.data
.pushsection .rodata
.global __smccc_workaround_1_smc
SYM_DATA_START(__smccc_workaround_1_smc)
esb
sub sp, sp, #(8 * 4)
stp x2, x3, [sp, #(8 * 0)]
stp x0, x1, [sp, #(8 * 2)]
mov w0, #ARM_SMCCC_ARCH_WORKAROUND_1
smc #0
ldp x2, x3, [sp, #(8 * 0)]
ldp x0, x1, [sp, #(8 * 2)]
add sp, sp, #(8 * 4)
1: .org __smccc_workaround_1_smc + __SMCCC_WORKAROUND_1_SMC_SZ
.org 1b
SYM_DATA_END(__smccc_workaround_1_smc)
...@@ -137,7 +137,7 @@ void kvm_patch_vector_branch(struct alt_instr *alt, ...@@ -137,7 +137,7 @@ void kvm_patch_vector_branch(struct alt_instr *alt,
u64 addr; u64 addr;
u32 insn; u32 insn;
BUG_ON(nr_inst != 5); BUG_ON(nr_inst != 4);
if (!cpus_have_const_cap(ARM64_HARDEN_EL2_VECTORS) || if (!cpus_have_const_cap(ARM64_HARDEN_EL2_VECTORS) ||
WARN_ON_ONCE(has_vhe())) { WARN_ON_ONCE(has_vhe())) {
...@@ -160,15 +160,6 @@ void kvm_patch_vector_branch(struct alt_instr *alt, ...@@ -160,15 +160,6 @@ void kvm_patch_vector_branch(struct alt_instr *alt,
*/ */
addr += KVM_VECTOR_PREAMBLE; addr += KVM_VECTOR_PREAMBLE;
/* stp x0, x1, [sp, #-16]! */
insn = aarch64_insn_gen_load_store_pair(AARCH64_INSN_REG_0,
AARCH64_INSN_REG_1,
AARCH64_INSN_REG_SP,
-16,
AARCH64_INSN_VARIANT_64BIT,
AARCH64_INSN_LDST_STORE_PAIR_PRE_INDEX);
*updptr++ = cpu_to_le32(insn);
/* movz x0, #(addr & 0xffff) */ /* movz x0, #(addr & 0xffff) */
insn = aarch64_insn_gen_movewide(AARCH64_INSN_REG_0, insn = aarch64_insn_gen_movewide(AARCH64_INSN_REG_0,
(u16)addr, (u16)addr,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment