Commit 25c7cb05 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'x86_alternatives_for_v6.10_rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 asm alternatives updates from Borislav Petkov:

 - Switch the in-place instruction patching which lead to at least one
   weird bug with 32-bit guests, seeing stale instruction bytes, to one
   working on a buffer, like the rest of the alternatives code does

 - Add a long overdue check to the X86_FEATURE flag modifying functions
   to warn when former get changed in a non-compatible way after
   alternatives have been patched because those changes will be already
   wrong

 - Other cleanups

* tag 'x86_alternatives_for_v6.10_rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  x86/alternatives: Remove alternative_input_2()
  x86/alternatives: Sort local vars in apply_alternatives()
  x86/alternatives: Optimize optimize_nops()
  x86/alternatives: Get rid of __optimize_nops()
  x86/alternatives: Use a temporary buffer when optimizing NOPs
  x86/alternatives: Catch late X86_FEATURE modifiers
parents b4864f65 8dc8b02d
......@@ -286,20 +286,6 @@ static inline int alternatives_text_reserved(void *start, void *end)
asm_inline volatile (ALTERNATIVE(oldinstr, newinstr, ft_flags) \
: : "i" (0), ## input)
/*
* This is similar to alternative_input. But it has two features and
* respective instructions.
*
* If CPU has feature2, newinstr2 is used.
* Otherwise, if CPU has feature1, newinstr1 is used.
* Otherwise, oldinstr is used.
*/
#define alternative_input_2(oldinstr, newinstr1, ft_flags1, newinstr2, \
ft_flags2, input...) \
asm_inline volatile(ALTERNATIVE_2(oldinstr, newinstr1, ft_flags1, \
newinstr2, ft_flags2) \
: : "i" (0), ## input)
/* Like alternative_input, but with a single output argument */
#define alternative_io(oldinstr, newinstr, ft_flags, output, input...) \
asm_inline volatile (ALTERNATIVE(oldinstr, newinstr, ft_flags) \
......
......@@ -149,8 +149,12 @@ extern const char * const x86_bug_flags[NBUGINTS*32];
extern void setup_clear_cpu_cap(unsigned int bit);
extern void clear_cpu_cap(struct cpuinfo_x86 *c, unsigned int bit);
#define setup_force_cpu_cap(bit) do { \
set_cpu_cap(&boot_cpu_data, bit); \
#define setup_force_cpu_cap(bit) do { \
\
if (!boot_cpu_has(bit)) \
WARN_ON(alternatives_patched); \
\
set_cpu_cap(&boot_cpu_data, bit); \
set_bit(bit, (unsigned long *)cpu_caps_set); \
} while (0)
......
......@@ -15,7 +15,7 @@
extern void text_poke_early(void *addr, const void *opcode, size_t len);
extern void apply_relocation(u8 *buf, size_t len, u8 *dest, u8 *src, size_t src_len);
extern void apply_relocation(u8 *buf, const u8 * const instr, size_t instrlen, u8 *repl, size_t repl_len);
/*
* Clear and restore the kernel write-protection flag on the local CPU.
......
......@@ -124,6 +124,20 @@ const unsigned char * const x86_nops[ASM_NOP_MAX+1] =
#endif
};
/*
* Nomenclature for variable names to simplify and clarify this code and ease
* any potential staring at it:
*
* @instr: source address of the original instructions in the kernel text as
* generated by the compiler.
*
* @buf: temporary buffer on which the patching operates. This buffer is
* eventually text-poked into the kernel image.
*
* @replacement/@repl: pointer to the opcodes which are replacing @instr, located
* in the .altinstr_replacement section.
*/
/*
* Fill the buffer with a single effective instruction of size @len.
*
......@@ -133,28 +147,28 @@ const unsigned char * const x86_nops[ASM_NOP_MAX+1] =
* each single-byte NOPs). If @len to fill out is > ASM_NOP_MAX, pad with INT3 and
* *jump* over instead of executing long and daft NOPs.
*/
static void add_nop(u8 *instr, unsigned int len)
static void add_nop(u8 *buf, unsigned int len)
{
u8 *target = instr + len;
u8 *target = buf + len;
if (!len)
return;
if (len <= ASM_NOP_MAX) {
memcpy(instr, x86_nops[len], len);
memcpy(buf, x86_nops[len], len);
return;
}
if (len < 128) {
__text_gen_insn(instr, JMP8_INSN_OPCODE, instr, target, JMP8_INSN_SIZE);
instr += JMP8_INSN_SIZE;
__text_gen_insn(buf, JMP8_INSN_OPCODE, buf, target, JMP8_INSN_SIZE);
buf += JMP8_INSN_SIZE;
} else {
__text_gen_insn(instr, JMP32_INSN_OPCODE, instr, target, JMP32_INSN_SIZE);
instr += JMP32_INSN_SIZE;
__text_gen_insn(buf, JMP32_INSN_OPCODE, buf, target, JMP32_INSN_SIZE);
buf += JMP32_INSN_SIZE;
}
for (;instr < target; instr++)
*instr = INT3_INSN_OPCODE;
for (;buf < target; buf++)
*buf = INT3_INSN_OPCODE;
}
extern s32 __retpoline_sites[], __retpoline_sites_end[];
......@@ -187,12 +201,12 @@ static bool insn_is_nop(struct insn *insn)
* Find the offset of the first non-NOP instruction starting at @offset
* but no further than @len.
*/
static int skip_nops(u8 *instr, int offset, int len)
static int skip_nops(u8 *buf, int offset, int len)
{
struct insn insn;
for (; offset < len; offset += insn.length) {
if (insn_decode_kernel(&insn, &instr[offset]))
if (insn_decode_kernel(&insn, &buf[offset]))
break;
if (!insn_is_nop(&insn))
......@@ -202,67 +216,33 @@ static int skip_nops(u8 *instr, int offset, int len)
return offset;
}
/*
* Optimize a sequence of NOPs, possibly preceded by an unconditional jump
* to the end of the NOP sequence into a single NOP.
*/
static bool
__optimize_nops(u8 *instr, size_t len, struct insn *insn, int *next, int *prev, int *target)
{
int i = *next - insn->length;
switch (insn->opcode.bytes[0]) {
case JMP8_INSN_OPCODE:
case JMP32_INSN_OPCODE:
*prev = i;
*target = *next + insn->immediate.value;
return false;
}
if (insn_is_nop(insn)) {
int nop = i;
*next = skip_nops(instr, *next, len);
if (*target && *next == *target)
nop = *prev;
add_nop(instr + nop, *next - nop);
DUMP_BYTES(ALT, instr, len, "%px: [%d:%d) optimized NOPs: ", instr, nop, *next);
return true;
}
*target = 0;
return false;
}
/*
* "noinline" to cause control flow change and thus invalidate I$ and
* cause refetch after modification.
*/
static void __init_or_module noinline optimize_nops(u8 *instr, size_t len)
static void noinline optimize_nops(const u8 * const instr, u8 *buf, size_t len)
{
int prev, target = 0;
for (int next, i = 0; i < len; i = next) {
struct insn insn;
if (insn_decode_kernel(&insn, &instr[i]))
if (insn_decode_kernel(&insn, &buf[i]))
return;
next = i + insn.length;
__optimize_nops(instr, len, &insn, &next, &prev, &target);
}
}
if (insn_is_nop(&insn)) {
int nop = i;
static void __init_or_module noinline optimize_nops_inplace(u8 *instr, size_t len)
{
unsigned long flags;
/* Has the NOP already been optimized? */
if (i + insn.length == len)
return;
local_irq_save(flags);
optimize_nops(instr, len);
sync_core();
local_irq_restore(flags);
next = skip_nops(buf, next, len);
add_nop(buf + nop, next - nop);
DUMP_BYTES(ALT, buf, len, "%px: [%d:%d) optimized NOPs: ", instr, nop, next);
}
}
}
/*
......@@ -335,11 +315,9 @@ bool need_reloc(unsigned long offset, u8 *src, size_t src_len)
return (target < src || target > src + src_len);
}
void apply_relocation(u8 *buf, size_t len, u8 *dest, u8 *src, size_t src_len)
static void __apply_relocation(u8 *buf, const u8 * const instr, size_t instrlen, u8 *repl, size_t repl_len)
{
int prev, target = 0;
for (int next, i = 0; i < len; i = next) {
for (int next, i = 0; i < instrlen; i = next) {
struct insn insn;
if (WARN_ON_ONCE(insn_decode_kernel(&insn, &buf[i])))
......@@ -347,9 +325,6 @@ void apply_relocation(u8 *buf, size_t len, u8 *dest, u8 *src, size_t src_len)
next = i + insn.length;
if (__optimize_nops(buf, len, &insn, &next, &prev, &target))
continue;
switch (insn.opcode.bytes[0]) {
case 0x0f:
if (insn.opcode.bytes[1] < 0x80 ||
......@@ -361,10 +336,10 @@ void apply_relocation(u8 *buf, size_t len, u8 *dest, u8 *src, size_t src_len)
case JMP8_INSN_OPCODE:
case JMP32_INSN_OPCODE:
case CALL_INSN_OPCODE:
if (need_reloc(next + insn.immediate.value, src, src_len)) {
if (need_reloc(next + insn.immediate.value, repl, repl_len)) {
apply_reloc(insn.immediate.nbytes,
buf + i + insn_offset_immediate(&insn),
src - dest);
repl - instr);
}
/*
......@@ -372,7 +347,7 @@ void apply_relocation(u8 *buf, size_t len, u8 *dest, u8 *src, size_t src_len)
*/
if (insn.opcode.bytes[0] == JMP32_INSN_OPCODE) {
s32 imm = insn.immediate.value;
imm += src - dest;
imm += repl - instr;
imm += JMP32_INSN_SIZE - JMP8_INSN_SIZE;
if ((imm >> 31) == (imm >> 7)) {
buf[i+0] = JMP8_INSN_OPCODE;
......@@ -385,15 +360,21 @@ void apply_relocation(u8 *buf, size_t len, u8 *dest, u8 *src, size_t src_len)
}
if (insn_rip_relative(&insn)) {
if (need_reloc(next + insn.displacement.value, src, src_len)) {
if (need_reloc(next + insn.displacement.value, repl, repl_len)) {
apply_reloc(insn.displacement.nbytes,
buf + i + insn_offset_displacement(&insn),
src - dest);
repl - instr);
}
}
}
}
void apply_relocation(u8 *buf, const u8 * const instr, size_t instrlen, u8 *repl, size_t repl_len)
{
__apply_relocation(buf, instr, instrlen, repl, repl_len);
optimize_nops(instr, buf, repl_len);
}
/* Low-level backend functions usable from alternative code replacements. */
DEFINE_ASM_FUNC(nop_func, "", .entry.text);
EXPORT_SYMBOL_GPL(nop_func);
......@@ -464,9 +445,9 @@ static int alt_replace_call(u8 *instr, u8 *insn_buff, struct alt_instr *a)
void __init_or_module noinline apply_alternatives(struct alt_instr *start,
struct alt_instr *end)
{
struct alt_instr *a;
u8 *instr, *replacement;
u8 insn_buff[MAX_PATCH_LEN];
u8 *instr, *replacement;
struct alt_instr *a;
DPRINTK(ALT, "alt table %px, -> %px", start, end);
......@@ -504,7 +485,9 @@ void __init_or_module noinline apply_alternatives(struct alt_instr *start,
* patch if feature is *NOT* present.
*/
if (!boot_cpu_has(a->cpuid) == !(a->flags & ALT_FLAG_NOT)) {
optimize_nops_inplace(instr, a->instrlen);
memcpy(insn_buff, instr, a->instrlen);
optimize_nops(instr, insn_buff, a->instrlen);
text_poke_early(instr, insn_buff, a->instrlen);
continue;
}
......@@ -526,7 +509,7 @@ void __init_or_module noinline apply_alternatives(struct alt_instr *start,
for (; insn_buff_sz < a->instrlen; insn_buff_sz++)
insn_buff[insn_buff_sz] = 0x90;
apply_relocation(insn_buff, a->instrlen, instr, replacement, a->replacementlen);
apply_relocation(insn_buff, instr, a->instrlen, replacement, a->replacementlen);
DUMP_BYTES(ALT, instr, a->instrlen, "%px: old_insn: ", instr);
DUMP_BYTES(ALT, replacement, a->replacementlen, "%px: rpl_insn: ", replacement);
......@@ -761,7 +744,7 @@ void __init_or_module noinline apply_retpolines(s32 *start, s32 *end)
len = patch_retpoline(addr, &insn, bytes);
if (len == insn.length) {
optimize_nops(bytes, len);
optimize_nops(addr, bytes, len);
DUMP_BYTES(RETPOLINE, ((u8*)addr), len, "%px: orig: ", addr);
DUMP_BYTES(RETPOLINE, ((u8*)bytes), len, "%px: repl: ", addr);
text_poke_early(addr, bytes, len);
......
......@@ -185,8 +185,7 @@ static void *patch_dest(void *dest, bool direct)
u8 *pad = dest - tsize;
memcpy(insn_buff, skl_call_thunk_template, tsize);
apply_relocation(insn_buff, tsize, pad,
skl_call_thunk_template, tsize);
apply_relocation(insn_buff, pad, tsize, skl_call_thunk_template, tsize);
/* Already patched? */
if (!bcmp(pad, insn_buff, tsize))
......@@ -308,8 +307,7 @@ static bool is_callthunk(void *addr)
pad = (void *)(dest - tmpl_size);
memcpy(insn_buff, skl_call_thunk_template, tmpl_size);
apply_relocation(insn_buff, tmpl_size, pad,
skl_call_thunk_template, tmpl_size);
apply_relocation(insn_buff, pad, tmpl_size, skl_call_thunk_template, tmpl_size);
return !bcmp(pad, insn_buff, tmpl_size);
}
......@@ -327,8 +325,7 @@ int x86_call_depth_emit_accounting(u8 **pprog, void *func, void *ip)
return 0;
memcpy(insn_buff, skl_call_thunk_template, tmpl_size);
apply_relocation(insn_buff, tmpl_size, ip,
skl_call_thunk_template, tmpl_size);
apply_relocation(insn_buff, ip, tmpl_size, skl_call_thunk_template, tmpl_size);
memcpy(*pprog, insn_buff, tmpl_size);
*pprog += tmpl_size;
......
......@@ -114,6 +114,9 @@ static void do_clear_cpu_cap(struct cpuinfo_x86 *c, unsigned int feature)
if (WARN_ON(feature >= MAX_FEATURE_BITS))
return;
if (boot_cpu_has(feature))
WARN_ON(alternatives_patched);
clear_feature(c, feature);
/* Collect all features to disable, handling dependencies */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment