Commit 3a9e46f8 authored by David Mosberger's avatar David Mosberger

ia64: Based on patch by Jerome Marchand: Add ia64-optimized

	atomic_dec_and_lock().  Actually, this could be the generic
	version for any platform that has cmpxchg().
parent 087eb30d
......@@ -375,6 +375,11 @@ config COMPAT
depends on IA32_SUPPORT
default y
config HAVE_DEC_LOCK
bool
depends on (SMP || PREEMPT)
default y
config PERFMON
bool "Performance monitor support"
help
......
......@@ -13,6 +13,7 @@ lib-$(CONFIG_ITANIUM) += copy_page.o copy_user.o memcpy.o
lib-$(CONFIG_MCKINLEY) += copy_page_mck.o memcpy_mck.o
lib-$(CONFIG_PERFMON) += carta_random.o
lib-$(CONFIG_MD_RAID5) += xor.o
lib-$(CONFIG_HAVE_DEC_LOCK) += dec_and_lock.o
AFLAGS___divdi3.o =
AFLAGS___udivdi3.o = -DUNSIGNED
......
/*
* Copyright (C) 2003 Jerome Marchand, Bull S.A.
* Cleaned up by David Mosberger-Tang <davidm@hpl.hp.com>
*
* This file is released under the GPLv2, or at your option any later version.
*
* ia64 version of "atomic_dec_and_lock()" using the atomic "cmpxchg" instruction. This
* code is an adaptation of the x86 version of "atomic_dec_and_lock()".
*/
#include <linux/compiler.h>
#include <linux/module.h>
#include <linux/spinlock.h>
#include <asm/atomic.h>
/*
* Decrement REFCOUNT and if the count reaches zero, acquire the spinlock. Both of these
* operations have to be done atomically, so that the count doesn't drop to zero without
* acquiring the spinlock first.
*/
int
atomic_dec_and_lock (atomic_t *refcount, spinlock_t *lock)
{
int old, new;
do {
old = atomic_read(refcount);
new = old - 1;
if (unlikely (old == 1)) {
/* oops, we may be decrementing to zero, do it the slow way... */
spin_lock(lock);
if (atomic_dec_and_test(refcount))
return 1;
spin_unlock(lock);
return 0;
}
} while (cmpxchg(&refcount->counter, old, new) != old);
return 0;
}
EXPORT_SYMBOL(atomic_dec_and_lock);
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment