Commit ac0761eb authored by Sandipan Das's avatar Sandipan Das Committed by Michael Ellerman

bpf: take advantage of stack_depth tracking in powerpc JIT

Take advantage of stack_depth tracking, originally introduced for
x64, in powerpc JIT as well. Round up allocated stack by 16 bytes
to make sure it stays aligned for functions called from JITed bpf
program.
Signed-off-by: default avatarSandipan Das <sandipan@linux.vnet.ibm.com>
Reviewed-by: default avatarNaveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
parent 632f0574
...@@ -23,7 +23,7 @@ ...@@ -23,7 +23,7 @@
* [ nv gpr save area ] 8*8 | * [ nv gpr save area ] 8*8 |
* [ tail_call_cnt ] 8 | * [ tail_call_cnt ] 8 |
* [ local_tmp_var ] 8 | * [ local_tmp_var ] 8 |
* fp (r31) --> [ ebpf stack space ] 512 | * fp (r31) --> [ ebpf stack space ] upto 512 |
* [ frame header ] 32/112 | * [ frame header ] 32/112 |
* sp (r1) ---> [ stack pointer ] -------------- * sp (r1) ---> [ stack pointer ] --------------
*/ */
...@@ -32,8 +32,8 @@ ...@@ -32,8 +32,8 @@
#define BPF_PPC_STACK_SAVE (8*8) #define BPF_PPC_STACK_SAVE (8*8)
/* for bpf JIT code internal usage */ /* for bpf JIT code internal usage */
#define BPF_PPC_STACK_LOCALS 16 #define BPF_PPC_STACK_LOCALS 16
/* Ensure this is quadword aligned */ /* stack frame excluding BPF stack, ensure this is quadword aligned */
#define BPF_PPC_STACKFRAME (STACK_FRAME_MIN_SIZE + MAX_BPF_STACK + \ #define BPF_PPC_STACKFRAME (STACK_FRAME_MIN_SIZE + \
BPF_PPC_STACK_LOCALS + BPF_PPC_STACK_SAVE) BPF_PPC_STACK_LOCALS + BPF_PPC_STACK_SAVE)
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
...@@ -103,6 +103,7 @@ struct codegen_context { ...@@ -103,6 +103,7 @@ struct codegen_context {
*/ */
unsigned int seen; unsigned int seen;
unsigned int idx; unsigned int idx;
unsigned int stack_size;
}; };
#endif /* !__ASSEMBLY__ */ #endif /* !__ASSEMBLY__ */
......
...@@ -69,7 +69,7 @@ static inline bool bpf_has_stack_frame(struct codegen_context *ctx) ...@@ -69,7 +69,7 @@ static inline bool bpf_has_stack_frame(struct codegen_context *ctx)
static int bpf_jit_stack_local(struct codegen_context *ctx) static int bpf_jit_stack_local(struct codegen_context *ctx)
{ {
if (bpf_has_stack_frame(ctx)) if (bpf_has_stack_frame(ctx))
return STACK_FRAME_MIN_SIZE + MAX_BPF_STACK; return STACK_FRAME_MIN_SIZE + ctx->stack_size;
else else
return -(BPF_PPC_STACK_SAVE + 16); return -(BPF_PPC_STACK_SAVE + 16);
} }
...@@ -82,8 +82,9 @@ static int bpf_jit_stack_tailcallcnt(struct codegen_context *ctx) ...@@ -82,8 +82,9 @@ static int bpf_jit_stack_tailcallcnt(struct codegen_context *ctx)
static int bpf_jit_stack_offsetof(struct codegen_context *ctx, int reg) static int bpf_jit_stack_offsetof(struct codegen_context *ctx, int reg)
{ {
if (reg >= BPF_PPC_NVR_MIN && reg < 32) if (reg >= BPF_PPC_NVR_MIN && reg < 32)
return (bpf_has_stack_frame(ctx) ? BPF_PPC_STACKFRAME : 0) return (bpf_has_stack_frame(ctx) ?
- (8 * (32 - reg)); (BPF_PPC_STACKFRAME + ctx->stack_size) : 0)
- (8 * (32 - reg));
pr_err("BPF JIT is asking about unknown registers"); pr_err("BPF JIT is asking about unknown registers");
BUG(); BUG();
...@@ -134,7 +135,7 @@ static void bpf_jit_build_prologue(u32 *image, struct codegen_context *ctx) ...@@ -134,7 +135,7 @@ static void bpf_jit_build_prologue(u32 *image, struct codegen_context *ctx)
PPC_BPF_STL(0, 1, PPC_LR_STKOFF); PPC_BPF_STL(0, 1, PPC_LR_STKOFF);
} }
PPC_BPF_STLU(1, 1, -BPF_PPC_STACKFRAME); PPC_BPF_STLU(1, 1, -(BPF_PPC_STACKFRAME + ctx->stack_size));
} }
/* /*
...@@ -161,7 +162,7 @@ static void bpf_jit_build_prologue(u32 *image, struct codegen_context *ctx) ...@@ -161,7 +162,7 @@ static void bpf_jit_build_prologue(u32 *image, struct codegen_context *ctx)
/* Setup frame pointer to point to the bpf stack area */ /* Setup frame pointer to point to the bpf stack area */
if (bpf_is_seen_register(ctx, BPF_REG_FP)) if (bpf_is_seen_register(ctx, BPF_REG_FP))
PPC_ADDI(b2p[BPF_REG_FP], 1, PPC_ADDI(b2p[BPF_REG_FP], 1,
STACK_FRAME_MIN_SIZE + MAX_BPF_STACK); STACK_FRAME_MIN_SIZE + ctx->stack_size);
} }
static void bpf_jit_emit_common_epilogue(u32 *image, struct codegen_context *ctx) static void bpf_jit_emit_common_epilogue(u32 *image, struct codegen_context *ctx)
...@@ -183,7 +184,7 @@ static void bpf_jit_emit_common_epilogue(u32 *image, struct codegen_context *ctx ...@@ -183,7 +184,7 @@ static void bpf_jit_emit_common_epilogue(u32 *image, struct codegen_context *ctx
/* Tear down our stack frame */ /* Tear down our stack frame */
if (bpf_has_stack_frame(ctx)) { if (bpf_has_stack_frame(ctx)) {
PPC_ADDI(1, 1, BPF_PPC_STACKFRAME); PPC_ADDI(1, 1, BPF_PPC_STACKFRAME + ctx->stack_size);
if (ctx->seen & SEEN_FUNC) { if (ctx->seen & SEEN_FUNC) {
PPC_BPF_LL(0, 1, PPC_LR_STKOFF); PPC_BPF_LL(0, 1, PPC_LR_STKOFF);
PPC_MTLR(0); PPC_MTLR(0);
...@@ -1013,6 +1014,9 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp) ...@@ -1013,6 +1014,9 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp)
memset(&cgctx, 0, sizeof(struct codegen_context)); memset(&cgctx, 0, sizeof(struct codegen_context));
/* Make sure that the stack is quadword aligned. */
cgctx.stack_size = round_up(fp->aux->stack_depth, 16);
/* Scouting faux-generate pass 0 */ /* Scouting faux-generate pass 0 */
if (bpf_jit_build_body(fp, 0, &cgctx, addrs)) { if (bpf_jit_build_body(fp, 0, &cgctx, addrs)) {
/* We hit something illegal or unsupported. */ /* We hit something illegal or unsupported. */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment