Commit 69828c47 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux

Pull arm64 fixes from Catalin Marinas:

 - Allow CPUs affected by erratum 1418040 to come online late
   (previously we only fixed the other case - CPUs not affected by the
   erratum coming up late).

 - Fix branch offset in BPF JIT.

 - Defer the stolen time initialisation to the CPU online time from the
   CPU starting time to avoid a (sleep-able) memory allocation in an
   atomic context.

* tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux:
  arm64: paravirt: Initialize steal time when cpu is online
  arm64: bpf: Fix branch offset in JIT
  arm64: Allow CPUs unffected by ARM erratum 1418040 to come in late
parents 5a55d36f 75df529b
...@@ -910,8 +910,12 @@ const struct arm64_cpu_capabilities arm64_errata[] = { ...@@ -910,8 +910,12 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
.desc = "ARM erratum 1418040", .desc = "ARM erratum 1418040",
.capability = ARM64_WORKAROUND_1418040, .capability = ARM64_WORKAROUND_1418040,
ERRATA_MIDR_RANGE_LIST(erratum_1418040_list), ERRATA_MIDR_RANGE_LIST(erratum_1418040_list),
.type = (ARM64_CPUCAP_SCOPE_LOCAL_CPU | /*
ARM64_CPUCAP_PERMITTED_FOR_LATE_CPU), * We need to allow affected CPUs to come in late, but
* also need the non-affected CPUs to be able to come
* in at any point in time. Wonderful.
*/
.type = ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE,
}, },
#endif #endif
#ifdef CONFIG_ARM64_WORKAROUND_SPECULATIVE_AT #ifdef CONFIG_ARM64_WORKAROUND_SPECULATIVE_AT
......
...@@ -50,16 +50,19 @@ static u64 pv_steal_clock(int cpu) ...@@ -50,16 +50,19 @@ static u64 pv_steal_clock(int cpu)
struct pv_time_stolen_time_region *reg; struct pv_time_stolen_time_region *reg;
reg = per_cpu_ptr(&stolen_time_region, cpu); reg = per_cpu_ptr(&stolen_time_region, cpu);
if (!reg->kaddr) {
pr_warn_once("stolen time enabled but not configured for cpu %d\n", /*
cpu); * paravirt_steal_clock() may be called before the CPU
* online notification callback runs. Until the callback
* has run we just return zero.
*/
if (!reg->kaddr)
return 0; return 0;
}
return le64_to_cpu(READ_ONCE(reg->kaddr->stolen_time)); return le64_to_cpu(READ_ONCE(reg->kaddr->stolen_time));
} }
static int stolen_time_dying_cpu(unsigned int cpu) static int stolen_time_cpu_down_prepare(unsigned int cpu)
{ {
struct pv_time_stolen_time_region *reg; struct pv_time_stolen_time_region *reg;
...@@ -73,7 +76,7 @@ static int stolen_time_dying_cpu(unsigned int cpu) ...@@ -73,7 +76,7 @@ static int stolen_time_dying_cpu(unsigned int cpu)
return 0; return 0;
} }
static int init_stolen_time_cpu(unsigned int cpu) static int stolen_time_cpu_online(unsigned int cpu)
{ {
struct pv_time_stolen_time_region *reg; struct pv_time_stolen_time_region *reg;
struct arm_smccc_res res; struct arm_smccc_res res;
...@@ -103,19 +106,20 @@ static int init_stolen_time_cpu(unsigned int cpu) ...@@ -103,19 +106,20 @@ static int init_stolen_time_cpu(unsigned int cpu)
return 0; return 0;
} }
static int pv_time_init_stolen_time(void) static int __init pv_time_init_stolen_time(void)
{ {
int ret; int ret;
ret = cpuhp_setup_state(CPUHP_AP_ARM_KVMPV_STARTING, ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN,
"hypervisor/arm/pvtime:starting", "hypervisor/arm/pvtime:online",
init_stolen_time_cpu, stolen_time_dying_cpu); stolen_time_cpu_online,
stolen_time_cpu_down_prepare);
if (ret < 0) if (ret < 0)
return ret; return ret;
return 0; return 0;
} }
static bool has_pv_steal_clock(void) static bool __init has_pv_steal_clock(void)
{ {
struct arm_smccc_res res; struct arm_smccc_res res;
......
...@@ -143,14 +143,17 @@ static inline void emit_addr_mov_i64(const int reg, const u64 val, ...@@ -143,14 +143,17 @@ static inline void emit_addr_mov_i64(const int reg, const u64 val,
} }
} }
static inline int bpf2a64_offset(int bpf_to, int bpf_from, static inline int bpf2a64_offset(int bpf_insn, int off,
const struct jit_ctx *ctx) const struct jit_ctx *ctx)
{ {
int to = ctx->offset[bpf_to]; /* BPF JMP offset is relative to the next instruction */
/* -1 to account for the Branch instruction */ bpf_insn++;
int from = ctx->offset[bpf_from] - 1; /*
* Whereas arm64 branch instructions encode the offset
return to - from; * from the branch itself, so we must subtract 1 from the
* instruction offset.
*/
return ctx->offset[bpf_insn + off] - (ctx->offset[bpf_insn] - 1);
} }
static void jit_fill_hole(void *area, unsigned int size) static void jit_fill_hole(void *area, unsigned int size)
...@@ -642,7 +645,7 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, ...@@ -642,7 +645,7 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
/* JUMP off */ /* JUMP off */
case BPF_JMP | BPF_JA: case BPF_JMP | BPF_JA:
jmp_offset = bpf2a64_offset(i + off, i, ctx); jmp_offset = bpf2a64_offset(i, off, ctx);
check_imm26(jmp_offset); check_imm26(jmp_offset);
emit(A64_B(jmp_offset), ctx); emit(A64_B(jmp_offset), ctx);
break; break;
...@@ -669,7 +672,7 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, ...@@ -669,7 +672,7 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
case BPF_JMP32 | BPF_JSLE | BPF_X: case BPF_JMP32 | BPF_JSLE | BPF_X:
emit(A64_CMP(is64, dst, src), ctx); emit(A64_CMP(is64, dst, src), ctx);
emit_cond_jmp: emit_cond_jmp:
jmp_offset = bpf2a64_offset(i + off, i, ctx); jmp_offset = bpf2a64_offset(i, off, ctx);
check_imm19(jmp_offset); check_imm19(jmp_offset);
switch (BPF_OP(code)) { switch (BPF_OP(code)) {
case BPF_JEQ: case BPF_JEQ:
...@@ -908,10 +911,21 @@ static int build_body(struct jit_ctx *ctx, bool extra_pass) ...@@ -908,10 +911,21 @@ static int build_body(struct jit_ctx *ctx, bool extra_pass)
const struct bpf_prog *prog = ctx->prog; const struct bpf_prog *prog = ctx->prog;
int i; int i;
/*
* - offset[0] offset of the end of prologue,
* start of the 1st instruction.
* - offset[1] - offset of the end of 1st instruction,
* start of the 2nd instruction
* [....]
* - offset[3] - offset of the end of 3rd instruction,
* start of 4th instruction
*/
for (i = 0; i < prog->len; i++) { for (i = 0; i < prog->len; i++) {
const struct bpf_insn *insn = &prog->insnsi[i]; const struct bpf_insn *insn = &prog->insnsi[i];
int ret; int ret;
if (ctx->image == NULL)
ctx->offset[i] = ctx->idx;
ret = build_insn(insn, ctx, extra_pass); ret = build_insn(insn, ctx, extra_pass);
if (ret > 0) { if (ret > 0) {
i++; i++;
...@@ -919,11 +933,16 @@ static int build_body(struct jit_ctx *ctx, bool extra_pass) ...@@ -919,11 +933,16 @@ static int build_body(struct jit_ctx *ctx, bool extra_pass)
ctx->offset[i] = ctx->idx; ctx->offset[i] = ctx->idx;
continue; continue;
} }
if (ctx->image == NULL)
ctx->offset[i] = ctx->idx;
if (ret) if (ret)
return ret; return ret;
} }
/*
* offset is allocated with prog->len + 1 so fill in
* the last element with the offset after the last
* instruction (end of program)
*/
if (ctx->image == NULL)
ctx->offset[i] = ctx->idx;
return 0; return 0;
} }
...@@ -1002,7 +1021,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) ...@@ -1002,7 +1021,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
memset(&ctx, 0, sizeof(ctx)); memset(&ctx, 0, sizeof(ctx));
ctx.prog = prog; ctx.prog = prog;
ctx.offset = kcalloc(prog->len, sizeof(int), GFP_KERNEL); ctx.offset = kcalloc(prog->len + 1, sizeof(int), GFP_KERNEL);
if (ctx.offset == NULL) { if (ctx.offset == NULL) {
prog = orig_prog; prog = orig_prog;
goto out_off; goto out_off;
...@@ -1089,7 +1108,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) ...@@ -1089,7 +1108,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
prog->jited_len = prog_size; prog->jited_len = prog_size;
if (!prog->is_func || extra_pass) { if (!prog->is_func || extra_pass) {
bpf_prog_fill_jited_linfo(prog, ctx.offset); bpf_prog_fill_jited_linfo(prog, ctx.offset + 1);
out_off: out_off:
kfree(ctx.offset); kfree(ctx.offset);
kfree(jit_data); kfree(jit_data);
......
...@@ -142,7 +142,6 @@ enum cpuhp_state { ...@@ -142,7 +142,6 @@ enum cpuhp_state {
/* Must be the last timer callback */ /* Must be the last timer callback */
CPUHP_AP_DUMMY_TIMER_STARTING, CPUHP_AP_DUMMY_TIMER_STARTING,
CPUHP_AP_ARM_XEN_STARTING, CPUHP_AP_ARM_XEN_STARTING,
CPUHP_AP_ARM_KVMPV_STARTING,
CPUHP_AP_ARM_CORESIGHT_STARTING, CPUHP_AP_ARM_CORESIGHT_STARTING,
CPUHP_AP_ARM_CORESIGHT_CTI_STARTING, CPUHP_AP_ARM_CORESIGHT_CTI_STARTING,
CPUHP_AP_ARM64_ISNDEP_STARTING, CPUHP_AP_ARM64_ISNDEP_STARTING,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment