Commit 2c0b8a75 authored by Mathieu Desnoyers's avatar Mathieu Desnoyers Committed by Ingo Molnar

x86: fall back on interrupt disable in cmpxchg8b on 80386 and 80486

Actually, on 386, cmpxchg and cmpxchg_local fall back on
cmpxchg_386_u8/16/32: it disables interruptions around non atomic
updates to mimic the cmpxchg behavior.

The comment:
/* Poor man's cmpxchg for 386. Unsuitable for SMP */

already present in cmpxchg_386_u32 tells much about how this cmpxchg
implementation should not be used in a SMP context. However, the cmpxchg_local
can perfectly use this fallback, since it only needs to be atomic wrt the local
cpu.

This patch adds a cmpxchg_486_u64 and uses it as a fallback for cmpxchg64
and cmpxchg64_local on 80386 and 80486.

Q:
but why is it called cmpxchg_486 when the other functions are called

A:
Because the standard cmpxchg is missing only on 386, but cmpxchg8b is
missing both on 386 and 486.

Citing Intel's Instruction set reference:

cmpxchg:
This instruction is not supported on Intel processors earlier than the
Intel486 processors.

cmpxchg8b:
This instruction encoding is not supported on Intel processors earlier
than the Pentium processors.

Q:
What's the reason to have cmpxchg64_local on 32 bit architectures?
Without that need all this would just be a few simple defines.

A:
cmpxchg64_local on 32 bits architectures takes unsigned long long
parameters, but cmpxchg_local only takes longs. Since we have cmpxchg8b
to execute a 8 byte cmpxchg atomically on pentium and +, it makes sense
to provide a flavor of cmpxchg and cmpxchg_local using this instruction.

Also, for 32 bits architectures lacking the 64 bits atomic cmpxchg, it
makes sense _not_ to define cmpxchg64 while cmpxchg could still be
available.

Moreover, the fallback for cmpxchg8b on i386 for 386 and 486 is a

However, cmpxchg64_local will be emulated by disabling interrupts on all
architectures where it is not supported atomically.

Therefore, we *could* turn cmpxchg64_local into a cmpxchg_local, but it
would make the 386/486 fallbacks ugly, make its design different from
cmpxchg/cmpxchg64 (which really depends on atomic operations and cannot
be emulated) and require the __cmpxchg_local to be expressed as a macro
rather than an inline function so the parameters would not be fixed to
unsigned long long in every case.

So I think cmpxchg64_local makes sense there, but I am open to
suggestions.

Q:
Are there any callers?

A:
I am actually using it in LTTng in my timestamping code. I use it to
work around CPUs with asynchronous TSCs. I need to update 64 bits
values atomically on this 32 bits architecture.

Changelog:
- Ran though checkpatch.
Signed-off-by: default avatarMathieu Desnoyers <mathieu.desnoyers@polymtl.ca>
Cc: Andi Kleen <ak@suse.de>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
parent 5f627f8e
...@@ -342,5 +342,22 @@ unsigned long cmpxchg_386_u32(volatile void *ptr, u32 old, u32 new) ...@@ -342,5 +342,22 @@ unsigned long cmpxchg_386_u32(volatile void *ptr, u32 old, u32 new)
EXPORT_SYMBOL(cmpxchg_386_u32); EXPORT_SYMBOL(cmpxchg_386_u32);
#endif #endif
#ifndef CONFIG_X86_CMPXCHG64
unsigned long long cmpxchg_486_u64(volatile void *ptr, u64 old, u64 new)
{
u64 prev;
unsigned long flags;
/* Poor man's cmpxchg8b for 386 and 486. Unsuitable for SMP */
local_irq_save(flags);
prev = *(u64 *)ptr;
if (prev == old)
*(u64 *)ptr = new;
local_irq_restore(flags);
return prev;
}
EXPORT_SYMBOL(cmpxchg_486_u64);
#endif
// arch_initcall(intel_cpu_init); // arch_initcall(intel_cpu_init);
...@@ -105,15 +105,24 @@ static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int siz ...@@ -105,15 +105,24 @@ static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int siz
#ifdef CONFIG_X86_CMPXCHG #ifdef CONFIG_X86_CMPXCHG
#define __HAVE_ARCH_CMPXCHG 1 #define __HAVE_ARCH_CMPXCHG 1
#define cmpxchg(ptr,o,n)\ #define cmpxchg(ptr, o, n) \
((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o),\ ((__typeof__(*(ptr)))__cmpxchg((ptr), (unsigned long)(o), \
(unsigned long)(n),sizeof(*(ptr)))) (unsigned long)(n), sizeof(*(ptr))))
#define sync_cmpxchg(ptr,o,n)\ #define sync_cmpxchg(ptr, o, n) \
((__typeof__(*(ptr)))__sync_cmpxchg((ptr),(unsigned long)(o),\ ((__typeof__(*(ptr)))__sync_cmpxchg((ptr), (unsigned long)(o), \
(unsigned long)(n),sizeof(*(ptr)))) (unsigned long)(n), sizeof(*(ptr))))
#define cmpxchg_local(ptr,o,n)\ #define cmpxchg_local(ptr, o, n) \
((__typeof__(*(ptr)))__cmpxchg_local((ptr),(unsigned long)(o),\ ((__typeof__(*(ptr)))__cmpxchg_local((ptr), (unsigned long)(o), \
(unsigned long)(n),sizeof(*(ptr)))) (unsigned long)(n), sizeof(*(ptr))))
#endif
#ifdef CONFIG_X86_CMPXCHG64
#define cmpxchg64(ptr, o, n) \
((__typeof__(*(ptr)))__cmpxchg64((ptr), (unsigned long long)(o), \
(unsigned long long)(n)))
#define cmpxchg64_local(ptr, o, n) \
((__typeof__(*(ptr)))__cmpxchg64_local((ptr), (unsigned long long)(o),\
(unsigned long long)(n)))
#endif #endif
static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
...@@ -203,6 +212,34 @@ static inline unsigned long __cmpxchg_local(volatile void *ptr, ...@@ -203,6 +212,34 @@ static inline unsigned long __cmpxchg_local(volatile void *ptr,
return old; return old;
} }
static inline unsigned long long __cmpxchg64(volatile void *ptr,
unsigned long long old, unsigned long long new)
{
unsigned long long prev;
__asm__ __volatile__(LOCK_PREFIX "cmpxchg8b %3"
: "=A"(prev)
: "b"((unsigned long)new),
"c"((unsigned long)(new >> 32)),
"m"(*__xg(ptr)),
"0"(old)
: "memory");
return prev;
}
static inline unsigned long long __cmpxchg64_local(volatile void *ptr,
unsigned long long old, unsigned long long new)
{
unsigned long long prev;
__asm__ __volatile__("cmpxchg8b %3"
: "=A"(prev)
: "b"((unsigned long)new),
"c"((unsigned long)(new >> 32)),
"m"(*__xg(ptr)),
"0"(old)
: "memory");
return prev;
}
#ifndef CONFIG_X86_CMPXCHG #ifndef CONFIG_X86_CMPXCHG
/* /*
* Building a kernel capable running on 80386. It may be necessary to * Building a kernel capable running on 80386. It may be necessary to
...@@ -228,7 +265,7 @@ static inline unsigned long cmpxchg_386(volatile void *ptr, unsigned long old, ...@@ -228,7 +265,7 @@ static inline unsigned long cmpxchg_386(volatile void *ptr, unsigned long old,
return old; return old;
} }
#define cmpxchg(ptr,o,n) \ #define cmpxchg(ptr, o, n) \
({ \ ({ \
__typeof__(*(ptr)) __ret; \ __typeof__(*(ptr)) __ret; \
if (likely(boot_cpu_data.x86 > 3)) \ if (likely(boot_cpu_data.x86 > 3)) \
...@@ -239,7 +276,7 @@ static inline unsigned long cmpxchg_386(volatile void *ptr, unsigned long old, ...@@ -239,7 +276,7 @@ static inline unsigned long cmpxchg_386(volatile void *ptr, unsigned long old,
(unsigned long)(n), sizeof(*(ptr))); \ (unsigned long)(n), sizeof(*(ptr))); \
__ret; \ __ret; \
}) })
#define cmpxchg_local(ptr,o,n) \ #define cmpxchg_local(ptr, o, n) \
({ \ ({ \
__typeof__(*(ptr)) __ret; \ __typeof__(*(ptr)) __ret; \
if (likely(boot_cpu_data.x86 > 3)) \ if (likely(boot_cpu_data.x86 > 3)) \
...@@ -252,38 +289,37 @@ static inline unsigned long cmpxchg_386(volatile void *ptr, unsigned long old, ...@@ -252,38 +289,37 @@ static inline unsigned long cmpxchg_386(volatile void *ptr, unsigned long old,
}) })
#endif #endif
static inline unsigned long long __cmpxchg64(volatile void *ptr, unsigned long long old, #ifndef CONFIG_X86_CMPXCHG64
unsigned long long new) /*
{ * Building a kernel capable running on 80386 and 80486. It may be necessary
unsigned long long prev; * to simulate the cmpxchg8b on the 80386 and 80486 CPU.
__asm__ __volatile__(LOCK_PREFIX "cmpxchg8b %3" */
: "=A"(prev)
: "b"((unsigned long)new),
"c"((unsigned long)(new >> 32)),
"m"(*__xg(ptr)),
"0"(old)
: "memory");
return prev;
}
static inline unsigned long long __cmpxchg64_local(volatile void *ptr, extern unsigned long long cmpxchg_486_u64(volatile void *, u64, u64);
unsigned long long old, unsigned long long new)
{ #define cmpxchg64(ptr, o, n) \
unsigned long long prev; ({ \
__asm__ __volatile__("cmpxchg8b %3" __typeof__(*(ptr)) __ret; \
: "=A"(prev) if (likely(boot_cpu_data.x86 > 4)) \
: "b"((unsigned long)new), __ret = __cmpxchg64((ptr), (unsigned long long)(o), \
"c"((unsigned long)(new >> 32)), (unsigned long long)(n)); \
"m"(*__xg(ptr)), else \
"0"(old) __ret = cmpxchg_486_u64((ptr), (unsigned long long)(o), \
: "memory"); (unsigned long long)(n)); \
return prev; __ret; \
} })
#define cmpxchg64_local(ptr, o, n) \
({ \
__typeof__(*(ptr)) __ret; \
if (likely(boot_cpu_data.x86 > 4)) \
__ret = __cmpxchg64_local((ptr), (unsigned long long)(o), \
(unsigned long long)(n)); \
else \
__ret = cmpxchg_486_u64((ptr), (unsigned long long)(o), \
(unsigned long long)(n)); \
__ret; \
})
#endif
#define cmpxchg64(ptr,o,n)\
((__typeof__(*(ptr)))__cmpxchg64((ptr),(unsigned long long)(o),\
(unsigned long long)(n)))
#define cmpxchg64_local(ptr,o,n)\
((__typeof__(*(ptr)))__cmpxchg64_local((ptr),(unsigned long long)(o),\
(unsigned long long)(n)))
#endif #endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment