Commit 8aba0a3d authored by Andi Kleen's avatar Andi Kleen Committed by Linus Torvalds

[PATCH] Runtime memory barrier patching

This implements automatic code patching of memory barriers based
on the CPU capabilities. Normally lock ; addl $0,(%esp) barriers
are used, but these are a bit slow on the Pentium 4.

Linus proposed this a few weeks ago after the support for SSE1/SSE2
barriers was introduced. I now got around to implement it.

The main advantage is that it allows distributors to ship less binary
kernels but still get fast kernels. In particular it avoids the
need of a special Pentium 4 kernel.

The patching code is quite generic and could be used to patch
other instructions (like prefetches or specific other critical
instructions) too.
Thanks to Rusty's in kernel loader it also works seamlessly for modules.

The patching is done before other CPUs start to avoid potential
erratas with self modifying code on SMP systems. It makes no
attempt to automatically handle assymetric systems (an secondary
CPU having less capabilities than the boot CPU). In this
case just boot with "noreplacement"
parent 5a49258a
......@@ -104,9 +104,22 @@ int apply_relocate_add(Elf32_Shdr *sechdrs,
return -ENOEXEC;
}
extern void apply_alternatives(void *start, void *end);
int module_finalize(const Elf_Ehdr *hdr,
const Elf_Shdr *sechdrs,
struct module *me)
{
Elf_Shdr *s;
char *secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
/* look for .altinstructions to patch */
for (s = sechdrs; s < sechdrs + hdr->e_shnum; s++) {
void *seg;
if (strcmp(".altinstructions", secstrings + s->sh_name))
continue;
seg = (void *)s->sh_addr;
apply_alternatives(seg, seg + s->sh_size);
}
return 0;
}
......@@ -795,6 +795,63 @@ static void __init register_memory(unsigned long max_low_pfn)
pci_mem_start = low_mem_size;
}
/* Replace instructions with better alternatives for this CPU type.
This runs before SMP is initialized to avoid SMP problems with
self modifying code. This implies that assymetric systems where
APs have less capabilities than the boot processor are not handled.
In this case boot with "noreplacement". */
void __init apply_alternatives(void *start, void *end)
{
struct alt_instr *a;
int diff, i, k;
for (a = start; a < end;
a = (void *)ALIGN((unsigned long)(a + 1) + a->instrlen, 4)) {
if (!boot_cpu_has(a->cpuid))
continue;
BUG_ON(a->replacementlen > a->instrlen);
memcpy(a->instr, a->replacement, a->replacementlen);
diff = a->instrlen - a->replacementlen;
for (i = a->replacementlen; diff > 0; diff -= k, i += k) {
static const char *nops[] = {
0,
"\x90",
#if CONFIG_MK7 || CONFIG_MK8
"\x66\x90",
"\x66\x66\x90",
"\x66\x66\x66\x90",
#else
"\x89\xf6",
"\x8d\x76\x00",
"\x8d\x74\x26\x00",
#endif
};
k = min_t(int, diff, ARRAY_SIZE(nops));
memcpy(a->instr + i, nops[k], k);
}
}
}
static int no_replacement __initdata = 0;
void __init alternative_instructions(void)
{
extern struct alt_instr __alt_instructions[], __alt_instructions_end[];
if (no_replacement)
return;
apply_alternatives(__alt_instructions, __alt_instructions_end);
}
static int __init noreplacement_setup(char *s)
{
no_replacement = 1;
return 0;
}
__setup("noreplacement", noreplacement_setup);
void __init setup_arch(char **cmdline_p)
{
unsigned long max_low_pfn;
......
......@@ -81,6 +81,10 @@ SECTIONS
__con_initcall_start = .;
.con_initcall.init : { *(.con_initcall.init) }
__con_initcall_end = .;
. = ALIGN(4);
__alt_instructions = .;
.altinstructions : { *(.altinstructions) }
__alt_instructions_end = .;
. = ALIGN(4096);
__initramfs_start = .;
.init.ramfs : { *(.init.ramfs) }
......
......@@ -200,6 +200,8 @@ static void __init check_config(void)
#endif
}
extern void alternative_instructions(void);
static void __init check_bugs(void)
{
identify_cpu(&boot_cpu_data);
......@@ -212,4 +214,5 @@ static void __init check_bugs(void)
check_hlt();
check_popad();
system_utsname.machine[1] = '0' + (boot_cpu_data.x86 > 6 ? 6 : boot_cpu_data.x86);
alternative_instructions();
}
......@@ -4,6 +4,7 @@
#include <linux/config.h>
#include <linux/kernel.h>
#include <asm/segment.h>
#include <asm/cpufeature.h>
#include <linux/bitops.h> /* for LOCK_PREFIX */
#ifdef __KERNEL__
......@@ -276,6 +277,37 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
/* Compiling for a 386 proper. Is it worth implementing via cli/sti? */
#endif
struct alt_instr {
u8 *instr; /* original instruction */
u8 cpuid; /* cpuid bit set for replacement */
u8 instrlen; /* length of original instruction */
u8 replacementlen; /* length of new instruction, <= instrlen */
u8 replacement[0]; /* new instruction */
};
/*
* Alternative instructions for different CPU types or capabilities.
*
* This allows to use optimized instructions even on generic binary
* kernels.
*
* length of oldinstr must be longer or equal the length of newinstr
* It can be padded with nops as needed.
*
* For non barrier like inlines please define new variants
* without volatile and memory clobber.
*/
#define alternative(oldinstr, newinstr, feature) \
asm volatile ("661:\n\t" oldinstr "\n662:\n" \
".section .altinstructions,\"a\"\n" \
" .align 4\n" \
" .long 661b\n" /* label */ \
" .byte %c0\n" /* feature bit */ \
" .byte 662b-661b\n" /* sourcelen */ \
" .byte 664f-663f\n" /* replacementlen */ \
"663:\n\t" newinstr "\n664:\n" /* replacement */ \
".previous" :: "i" (feature) : "memory")
/*
* Force strict CPU ordering.
* And yes, this is required on UP too when we're talking
......@@ -294,13 +326,15 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
* nop for these.
*/
#ifdef CONFIG_X86_SSE2
#define mb() asm volatile("mfence" ::: "memory")
#define rmb() asm volatile("lfence" ::: "memory")
#else
#define mb() __asm__ __volatile__ ("lock; addl $0,0(%%esp)": : :"memory")
#define rmb() mb()
#endif
/*
* Actually only lfence would be needed for mb() because all stores done
* by the kernel should be already ordered. But keep a full barrier for now.
*/
#define mb() alternative("lock; addl $0,0(%%esp)", "mfence", X86_FEATURE_XMM2)
#define rmb() alternative("lock; addl $0,0(%%esp)", "lfence", X86_FEATURE_XMM2)
/**
* read_barrier_depends - Flush all pending reads that subsequents reads
* depend on.
......@@ -356,7 +390,9 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
#define read_barrier_depends() do { } while(0)
#ifdef CONFIG_X86_OOSTORE
#define wmb() __asm__ __volatile__ ("lock; addl $0,0(%%esp)": : :"memory")
/* Actually there are no OOO store capable CPUs for now that do SSE,
but make it already an possibility. */
#define wmb() alternative("lock; addl $0,0(%%esp)", "sfence", X86_FEATURE_XMM)
#else
#define wmb() __asm__ __volatile__ ("": : :"memory")
#endif
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment