Commit 17bce3b2 authored by Uros Bizjak's avatar Uros Bizjak Committed by Ingo Molnar

x86/callthunks: Handle %rip-relative relocations in call thunk template

Contrary to alternatives, relocations are currently not supported in
call thunk templates.  Re-use the existing infrastructure from
alternative.c to allow %rip-relative relocations when copying call
thunk template from its storage location.

The patch allows unification of ASM_INCREMENT_CALL_DEPTH, which already
uses PER_CPU_VAR macro, with INCREMENT_CALL_DEPTH, used in call thunk
template, which is currently limited to use absolute address.

Reuse existing relocation infrastructure from alternative.c.,
as suggested by Peter Zijlstra.
Signed-off-by: default avatarUros Bizjak <ubizjak@gmail.com>
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
Link: https://lore.kernel.org/r/20231105213731.1878100-3-ubizjak@gmail.com
parent 43bda69e
...@@ -18,6 +18,8 @@ static inline void apply_paravirt(struct paravirt_patch_site *start, ...@@ -18,6 +18,8 @@ static inline void apply_paravirt(struct paravirt_patch_site *start,
#define __parainstructions_end NULL #define __parainstructions_end NULL
#endif #endif
void apply_relocation(u8 *buf, size_t len, u8 *dest, u8 *src, size_t src_len);
/* /*
* Currently, the max observed size in the kernel code is * Currently, the max observed size in the kernel code is
* JUMP_LABEL_NOP_SIZE/RELATIVEJUMP_SIZE, which are 5. * JUMP_LABEL_NOP_SIZE/RELATIVEJUMP_SIZE, which are 5.
......
...@@ -325,8 +325,7 @@ bool need_reloc(unsigned long offset, u8 *src, size_t src_len) ...@@ -325,8 +325,7 @@ bool need_reloc(unsigned long offset, u8 *src, size_t src_len)
return (target < src || target > src + src_len); return (target < src || target > src + src_len);
} }
static void __init_or_module noinline void apply_relocation(u8 *buf, size_t len, u8 *dest, u8 *src, size_t src_len)
apply_relocation(u8 *buf, size_t len, u8 *dest, u8 *src, size_t src_len)
{ {
int prev, target = 0; int prev, target = 0;
......
...@@ -24,6 +24,8 @@ ...@@ -24,6 +24,8 @@
static int __initdata_or_module debug_callthunks; static int __initdata_or_module debug_callthunks;
#define MAX_PATCH_LEN (255-1)
#define prdbg(fmt, args...) \ #define prdbg(fmt, args...) \
do { \ do { \
if (debug_callthunks) \ if (debug_callthunks) \
...@@ -184,10 +186,15 @@ static const u8 nops[] = { ...@@ -184,10 +186,15 @@ static const u8 nops[] = {
static void *patch_dest(void *dest, bool direct) static void *patch_dest(void *dest, bool direct)
{ {
unsigned int tsize = SKL_TMPL_SIZE; unsigned int tsize = SKL_TMPL_SIZE;
u8 insn_buff[MAX_PATCH_LEN];
u8 *pad = dest - tsize; u8 *pad = dest - tsize;
memcpy(insn_buff, skl_call_thunk_template, tsize);
apply_relocation(insn_buff, tsize, pad,
skl_call_thunk_template, tsize);
/* Already patched? */ /* Already patched? */
if (!bcmp(pad, skl_call_thunk_template, tsize)) if (!bcmp(pad, insn_buff, tsize))
return pad; return pad;
/* Ensure there are nops */ /* Ensure there are nops */
...@@ -197,9 +204,9 @@ static void *patch_dest(void *dest, bool direct) ...@@ -197,9 +204,9 @@ static void *patch_dest(void *dest, bool direct)
} }
if (direct) if (direct)
memcpy(pad, skl_call_thunk_template, tsize); memcpy(pad, insn_buff, tsize);
else else
text_poke_copy_locked(pad, skl_call_thunk_template, tsize, true); text_poke_copy_locked(pad, insn_buff, tsize, true);
return pad; return pad;
} }
...@@ -297,20 +304,27 @@ void *callthunks_translate_call_dest(void *dest) ...@@ -297,20 +304,27 @@ void *callthunks_translate_call_dest(void *dest)
static bool is_callthunk(void *addr) static bool is_callthunk(void *addr)
{ {
unsigned int tmpl_size = SKL_TMPL_SIZE; unsigned int tmpl_size = SKL_TMPL_SIZE;
void *tmpl = skl_call_thunk_template; u8 insn_buff[MAX_PATCH_LEN];
unsigned long dest; unsigned long dest;
u8 *pad;
dest = roundup((unsigned long)addr, CONFIG_FUNCTION_ALIGNMENT); dest = roundup((unsigned long)addr, CONFIG_FUNCTION_ALIGNMENT);
if (!thunks_initialized || skip_addr((void *)dest)) if (!thunks_initialized || skip_addr((void *)dest))
return false; return false;
return !bcmp((void *)(dest - tmpl_size), tmpl, tmpl_size); *pad = dest - tmpl_size;
memcpy(insn_buff, skl_call_thunk_template, tmpl_size);
apply_relocation(insn_buff, tmpl_size, pad,
skl_call_thunk_template, tmpl_size);
return !bcmp(pad, insn_buff, tmpl_size);
} }
int x86_call_depth_emit_accounting(u8 **pprog, void *func) int x86_call_depth_emit_accounting(u8 **pprog, void *func)
{ {
unsigned int tmpl_size = SKL_TMPL_SIZE; unsigned int tmpl_size = SKL_TMPL_SIZE;
void *tmpl = skl_call_thunk_template; u8 insn_buff[MAX_PATCH_LEN];
if (!thunks_initialized) if (!thunks_initialized)
return 0; return 0;
...@@ -319,7 +333,11 @@ int x86_call_depth_emit_accounting(u8 **pprog, void *func) ...@@ -319,7 +333,11 @@ int x86_call_depth_emit_accounting(u8 **pprog, void *func)
if (func && is_callthunk(func)) if (func && is_callthunk(func))
return 0; return 0;
memcpy(*pprog, tmpl, tmpl_size); memcpy(insn_buff, skl_call_thunk_template, tmpl_size);
apply_relocation(insn_buff, tmpl_size, *pprog,
skl_call_thunk_template, tmpl_size);
memcpy(*pprog, insn_buff, tmpl_size);
*pprog += tmpl_size; *pprog += tmpl_size;
return tmpl_size; return tmpl_size;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment