Commit 973bd993 authored by Martin Schwidefsky's avatar Martin Schwidefsky Committed by Linus Torvalds

[PATCH] s390: atomic primitives

      Hugh Dickins <hugh@veritas.com>

Fix the broken atomic_cmpxchg primitive.  Add atomic_sub_and_test,
atomic64_sub_return, atomic64_sub_and_test, atomic64_cmpxchg,
atomic64_add_unless and atomic64_inc_not_zero.  Replace old style
atomic_compare_and_swap by atomic_cmpxchg.  Shorten the whole header by
defining most primitives with the two inline functions atomic_add_return and
atomic_sub_return.

In addition this patch contains the s390 related fixes of Hugh's "mm: fill
arch atomic64 gaps" patch.
Signed-off-by: default avatarMartin Schwidefsky <schwidefsky@de.ibm.com>
Signed-off-by: default avatarHugh Dickins <hugh@veritas.com>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent 8d93c700
...@@ -85,7 +85,7 @@ kexec_halt_all_cpus(void *kernel_image) ...@@ -85,7 +85,7 @@ kexec_halt_all_cpus(void *kernel_image)
pfault_fini(); pfault_fini();
#endif #endif
if (atomic_compare_and_swap(-1, smp_processor_id(), &cpuid)) if (atomic_cmpxchg(&cpuid, -1, smp_processor_id()) != -1)
signal_processor(smp_processor_id(), sigp_stop); signal_processor(smp_processor_id(), sigp_stop);
/* Wait for all other cpus to enter stopped state */ /* Wait for all other cpus to enter stopped state */
......
...@@ -263,7 +263,7 @@ static void do_machine_restart(void * __unused) ...@@ -263,7 +263,7 @@ static void do_machine_restart(void * __unused)
int cpu; int cpu;
static atomic_t cpuid = ATOMIC_INIT(-1); static atomic_t cpuid = ATOMIC_INIT(-1);
if (atomic_compare_and_swap(-1, smp_processor_id(), &cpuid)) if (atomic_cmpxchg(&cpuid, -1, smp_processor_id()) != -1)
signal_processor(smp_processor_id(), sigp_stop); signal_processor(smp_processor_id(), sigp_stop);
/* Wait for all other cpus to enter stopped state */ /* Wait for all other cpus to enter stopped state */
...@@ -313,7 +313,7 @@ static void do_machine_halt(void * __unused) ...@@ -313,7 +313,7 @@ static void do_machine_halt(void * __unused)
{ {
static atomic_t cpuid = ATOMIC_INIT(-1); static atomic_t cpuid = ATOMIC_INIT(-1);
if (atomic_compare_and_swap(-1, smp_processor_id(), &cpuid) == 0) { if (atomic_cmpxchg(&cpuid, -1, smp_processor_id()) == -1) {
smp_send_stop(); smp_send_stop();
if (MACHINE_IS_VM && strlen(vmhalt_cmd) > 0) if (MACHINE_IS_VM && strlen(vmhalt_cmd) > 0)
cpcmd(vmhalt_cmd, NULL, 0, NULL); cpcmd(vmhalt_cmd, NULL, 0, NULL);
...@@ -332,7 +332,7 @@ static void do_machine_power_off(void * __unused) ...@@ -332,7 +332,7 @@ static void do_machine_power_off(void * __unused)
{ {
static atomic_t cpuid = ATOMIC_INIT(-1); static atomic_t cpuid = ATOMIC_INIT(-1);
if (atomic_compare_and_swap(-1, smp_processor_id(), &cpuid) == 0) { if (atomic_cmpxchg(&cpuid, -1, smp_processor_id()) == -1) {
smp_send_stop(); smp_send_stop();
if (MACHINE_IS_VM && strlen(vmpoff_cmd) > 0) if (MACHINE_IS_VM && strlen(vmpoff_cmd) > 0)
cpcmd(vmpoff_cmd, NULL, 0, NULL); cpcmd(vmpoff_cmd, NULL, 0, NULL);
......
...@@ -7,7 +7,7 @@ ...@@ -7,7 +7,7 @@
* Bugreports.to..: <Linux390@de.ibm.com> * Bugreports.to..: <Linux390@de.ibm.com>
* (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999-2001 * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999-2001
* *
* $Revision: 1.167 $ * $Revision: 1.169 $
*/ */
#include <linux/config.h> #include <linux/config.h>
...@@ -1323,7 +1323,7 @@ void ...@@ -1323,7 +1323,7 @@ void
dasd_schedule_bh(struct dasd_device * device) dasd_schedule_bh(struct dasd_device * device)
{ {
/* Protect against rescheduling. */ /* Protect against rescheduling. */
if (atomic_compare_and_swap (0, 1, &device->tasklet_scheduled)) if (atomic_cmpxchg (&device->tasklet_scheduled, 0, 1) != 0)
return; return;
dasd_get_device(device); dasd_get_device(device);
tasklet_hi_schedule(&device->tasklet); tasklet_hi_schedule(&device->tasklet);
......
...@@ -32,7 +32,7 @@ do_load_quiesce_psw(void * __unused) ...@@ -32,7 +32,7 @@ do_load_quiesce_psw(void * __unused)
psw_t quiesce_psw; psw_t quiesce_psw;
int cpu; int cpu;
if (atomic_compare_and_swap(-1, smp_processor_id(), &cpuid)) if (atomic_cmpxchg(&cpuid, -1, smp_processor_id()) != -1)
signal_processor(smp_processor_id(), sigp_stop); signal_processor(smp_processor_id(), sigp_stop);
/* Wait for all other cpus to enter stopped state */ /* Wait for all other cpus to enter stopped state */
for_each_online_cpu(cpu) { for_each_online_cpu(cpu) {
......
...@@ -65,7 +65,7 @@ static void ...@@ -65,7 +65,7 @@ static void
tapeblock_trigger_requeue(struct tape_device *device) tapeblock_trigger_requeue(struct tape_device *device)
{ {
/* Protect against rescheduling. */ /* Protect against rescheduling. */
if (atomic_compare_and_swap(0, 1, &device->blk_data.requeue_scheduled)) if (atomic_cmpxchg(&device->blk_data.requeue_scheduled, 0, 1) != 0)
return; return;
schedule_work(&device->blk_data.requeue_task); schedule_work(&device->blk_data.requeue_task);
} }
......
/* /*
* drivers/s390/cio/ccwgroup.c * drivers/s390/cio/ccwgroup.c
* bus driver for ccwgroup * bus driver for ccwgroup
* $Revision: 1.32 $ * $Revision: 1.33 $
* *
* Copyright (C) 2002 IBM Deutschland Entwicklung GmbH, * Copyright (C) 2002 IBM Deutschland Entwicklung GmbH,
* IBM Corporation * IBM Corporation
...@@ -263,7 +263,7 @@ ccwgroup_set_online(struct ccwgroup_device *gdev) ...@@ -263,7 +263,7 @@ ccwgroup_set_online(struct ccwgroup_device *gdev)
struct ccwgroup_driver *gdrv; struct ccwgroup_driver *gdrv;
int ret; int ret;
if (atomic_compare_and_swap(0, 1, &gdev->onoff)) if (atomic_cmpxchg(&gdev->onoff, 0, 1) != 0)
return -EAGAIN; return -EAGAIN;
if (gdev->state == CCWGROUP_ONLINE) { if (gdev->state == CCWGROUP_ONLINE) {
ret = 0; ret = 0;
...@@ -289,7 +289,7 @@ ccwgroup_set_offline(struct ccwgroup_device *gdev) ...@@ -289,7 +289,7 @@ ccwgroup_set_offline(struct ccwgroup_device *gdev)
struct ccwgroup_driver *gdrv; struct ccwgroup_driver *gdrv;
int ret; int ret;
if (atomic_compare_and_swap(0, 1, &gdev->onoff)) if (atomic_cmpxchg(&gdev->onoff, 0, 1) != 0)
return -EAGAIN; return -EAGAIN;
if (gdev->state == CCWGROUP_OFFLINE) { if (gdev->state == CCWGROUP_OFFLINE) {
ret = 0; ret = 0;
......
/* /*
* drivers/s390/cio/device.c * drivers/s390/cio/device.c
* bus driver for ccw devices * bus driver for ccw devices
* $Revision: 1.131 $ * $Revision: 1.137 $
* *
* Copyright (C) 2002 IBM Deutschland Entwicklung GmbH, * Copyright (C) 2002 IBM Deutschland Entwicklung GmbH,
* IBM Corporation * IBM Corporation
...@@ -374,7 +374,7 @@ online_store (struct device *dev, struct device_attribute *attr, const char *buf ...@@ -374,7 +374,7 @@ online_store (struct device *dev, struct device_attribute *attr, const char *buf
int i, force, ret; int i, force, ret;
char *tmp; char *tmp;
if (atomic_compare_and_swap(0, 1, &cdev->private->onoff)) if (atomic_cmpxchg(&cdev->private->onoff, 0, 1) != 0)
return -EAGAIN; return -EAGAIN;
if (cdev->drv && !try_module_get(cdev->drv->owner)) { if (cdev->drv && !try_module_get(cdev->drv->owner)) {
......
/* /*
* $Id: iucv.c,v 1.45 2005/04/26 22:59:06 braunu Exp $ * $Id: iucv.c,v 1.47 2005/11/21 11:35:22 mschwide Exp $
* *
* IUCV network driver * IUCV network driver
* *
...@@ -29,7 +29,7 @@ ...@@ -29,7 +29,7 @@
* along with this program; if not, write to the Free Software * along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
* *
* RELEASE-TAG: IUCV lowlevel driver $Revision: 1.45 $ * RELEASE-TAG: IUCV lowlevel driver $Revision: 1.47 $
* *
*/ */
...@@ -355,7 +355,7 @@ do { \ ...@@ -355,7 +355,7 @@ do { \
static void static void
iucv_banner(void) iucv_banner(void)
{ {
char vbuf[] = "$Revision: 1.45 $"; char vbuf[] = "$Revision: 1.47 $";
char *version = vbuf; char *version = vbuf;
if ((version = strchr(version, ':'))) { if ((version = strchr(version, ':'))) {
...@@ -477,7 +477,7 @@ grab_param(void) ...@@ -477,7 +477,7 @@ grab_param(void)
ptr++; ptr++;
if (ptr >= iucv_param_pool + PARAM_POOL_SIZE) if (ptr >= iucv_param_pool + PARAM_POOL_SIZE)
ptr = iucv_param_pool; ptr = iucv_param_pool;
} while (atomic_compare_and_swap(0, 1, &ptr->in_use)); } while (atomic_cmpxchg(&ptr->in_use, 0, 1) != 0);
hint = ptr - iucv_param_pool; hint = ptr - iucv_param_pool;
memset(&ptr->param, 0, sizeof(ptr->param)); memset(&ptr->param, 0, sizeof(ptr->param));
......
...@@ -1396,7 +1396,7 @@ qeth_idx_activate_get_answer(struct qeth_channel *channel, ...@@ -1396,7 +1396,7 @@ qeth_idx_activate_get_answer(struct qeth_channel *channel,
channel->ccw.cda = (__u32) __pa(iob->data); channel->ccw.cda = (__u32) __pa(iob->data);
wait_event(card->wait_q, wait_event(card->wait_q,
atomic_compare_and_swap(0,1,&channel->irq_pending) == 0); atomic_cmpxchg(&channel->irq_pending, 0, 1) == 0);
QETH_DBF_TEXT(setup, 6, "noirqpnd"); QETH_DBF_TEXT(setup, 6, "noirqpnd");
spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags); spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
rc = ccw_device_start(channel->ccwdev, rc = ccw_device_start(channel->ccwdev,
...@@ -1463,7 +1463,7 @@ qeth_idx_activate_channel(struct qeth_channel *channel, ...@@ -1463,7 +1463,7 @@ qeth_idx_activate_channel(struct qeth_channel *channel,
memcpy(QETH_IDX_ACT_QDIO_DEV_REALADDR(iob->data), &temp, 2); memcpy(QETH_IDX_ACT_QDIO_DEV_REALADDR(iob->data), &temp, 2);
wait_event(card->wait_q, wait_event(card->wait_q,
atomic_compare_and_swap(0,1,&channel->irq_pending) == 0); atomic_cmpxchg(&channel->irq_pending, 0, 1) == 0);
QETH_DBF_TEXT(setup, 6, "noirqpnd"); QETH_DBF_TEXT(setup, 6, "noirqpnd");
spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags); spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
rc = ccw_device_start(channel->ccwdev, rc = ccw_device_start(channel->ccwdev,
...@@ -1616,7 +1616,7 @@ qeth_issue_next_read(struct qeth_card *card) ...@@ -1616,7 +1616,7 @@ qeth_issue_next_read(struct qeth_card *card)
} }
qeth_setup_ccw(&card->read, iob->data, QETH_BUFSIZE); qeth_setup_ccw(&card->read, iob->data, QETH_BUFSIZE);
wait_event(card->wait_q, wait_event(card->wait_q,
atomic_compare_and_swap(0,1,&card->read.irq_pending) == 0); atomic_cmpxchg(&card->read.irq_pending, 0, 1) == 0);
QETH_DBF_TEXT(trace, 6, "noirqpnd"); QETH_DBF_TEXT(trace, 6, "noirqpnd");
rc = ccw_device_start(card->read.ccwdev, &card->read.ccw, rc = ccw_device_start(card->read.ccwdev, &card->read.ccw,
(addr_t) iob, 0, 0); (addr_t) iob, 0, 0);
...@@ -1882,7 +1882,7 @@ qeth_send_control_data(struct qeth_card *card, int len, ...@@ -1882,7 +1882,7 @@ qeth_send_control_data(struct qeth_card *card, int len,
spin_unlock_irqrestore(&card->lock, flags); spin_unlock_irqrestore(&card->lock, flags);
QETH_DBF_HEX(control, 2, iob->data, QETH_DBF_CONTROL_LEN); QETH_DBF_HEX(control, 2, iob->data, QETH_DBF_CONTROL_LEN);
wait_event(card->wait_q, wait_event(card->wait_q,
atomic_compare_and_swap(0,1,&card->write.irq_pending) == 0); atomic_cmpxchg(&card->write.irq_pending, 0, 1) == 0);
qeth_prepare_control_data(card, len, iob); qeth_prepare_control_data(card, len, iob);
if (IS_IPA(iob->data)) if (IS_IPA(iob->data))
timer.expires = jiffies + QETH_IPA_TIMEOUT; timer.expires = jiffies + QETH_IPA_TIMEOUT;
...@@ -1924,7 +1924,7 @@ qeth_osn_send_control_data(struct qeth_card *card, int len, ...@@ -1924,7 +1924,7 @@ qeth_osn_send_control_data(struct qeth_card *card, int len,
QETH_DBF_TEXT(trace, 5, "osndctrd"); QETH_DBF_TEXT(trace, 5, "osndctrd");
wait_event(card->wait_q, wait_event(card->wait_q,
atomic_compare_and_swap(0,1,&card->write.irq_pending) == 0); atomic_cmpxchg(&card->write.irq_pending, 0, 1) == 0);
qeth_prepare_control_data(card, len, iob); qeth_prepare_control_data(card, len, iob);
QETH_DBF_TEXT(trace, 6, "osnoirqp"); QETH_DBF_TEXT(trace, 6, "osnoirqp");
spin_lock_irqsave(get_ccwdev_lock(card->write.ccwdev), flags); spin_lock_irqsave(get_ccwdev_lock(card->write.ccwdev), flags);
...@@ -4236,9 +4236,8 @@ qeth_do_send_packet_fast(struct qeth_card *card, struct qeth_qdio_out_q *queue, ...@@ -4236,9 +4236,8 @@ qeth_do_send_packet_fast(struct qeth_card *card, struct qeth_qdio_out_q *queue,
QETH_DBF_TEXT(trace, 6, "dosndpfa"); QETH_DBF_TEXT(trace, 6, "dosndpfa");
/* spin until we get the queue ... */ /* spin until we get the queue ... */
while (atomic_compare_and_swap(QETH_OUT_Q_UNLOCKED, while (atomic_cmpxchg(&queue->state, QETH_OUT_Q_UNLOCKED,
QETH_OUT_Q_LOCKED, QETH_OUT_Q_LOCKED) != QETH_OUT_Q_UNLOCKED);
&queue->state));
/* ... now we've got the queue */ /* ... now we've got the queue */
index = queue->next_buf_to_fill; index = queue->next_buf_to_fill;
buffer = &queue->bufs[queue->next_buf_to_fill]; buffer = &queue->bufs[queue->next_buf_to_fill];
...@@ -4292,9 +4291,8 @@ qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue, ...@@ -4292,9 +4291,8 @@ qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue,
QETH_DBF_TEXT(trace, 6, "dosndpkt"); QETH_DBF_TEXT(trace, 6, "dosndpkt");
/* spin until we get the queue ... */ /* spin until we get the queue ... */
while (atomic_compare_and_swap(QETH_OUT_Q_UNLOCKED, while (atomic_cmpxchg(&queue->state, QETH_OUT_Q_UNLOCKED,
QETH_OUT_Q_LOCKED, QETH_OUT_Q_LOCKED) != QETH_OUT_Q_UNLOCKED);
&queue->state));
start_index = queue->next_buf_to_fill; start_index = queue->next_buf_to_fill;
buffer = &queue->bufs[queue->next_buf_to_fill]; buffer = &queue->bufs[queue->next_buf_to_fill];
/* /*
......
...@@ -5,7 +5,7 @@ ...@@ -5,7 +5,7 @@
* include/asm-s390/atomic.h * include/asm-s390/atomic.h
* *
* S390 version * S390 version
* Copyright (C) 1999-2003 IBM Deutschland Entwicklung GmbH, IBM Corporation * Copyright (C) 1999-2005 IBM Deutschland Entwicklung GmbH, IBM Corporation
* Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com), * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
* Denis Joseph Barrow, * Denis Joseph Barrow,
* Arnd Bergmann (arndb@de.ibm.com) * Arnd Bergmann (arndb@de.ibm.com)
...@@ -45,59 +45,57 @@ typedef struct { ...@@ -45,59 +45,57 @@ typedef struct {
#define atomic_read(v) ((v)->counter) #define atomic_read(v) ((v)->counter)
#define atomic_set(v,i) (((v)->counter) = (i)) #define atomic_set(v,i) (((v)->counter) = (i))
static __inline__ void atomic_add(int i, atomic_t * v)
{
__CS_LOOP(v, i, "ar");
}
static __inline__ int atomic_add_return(int i, atomic_t * v) static __inline__ int atomic_add_return(int i, atomic_t * v)
{ {
return __CS_LOOP(v, i, "ar"); return __CS_LOOP(v, i, "ar");
} }
static __inline__ int atomic_add_negative(int i, atomic_t * v) #define atomic_add(_i, _v) atomic_add_return(_i, _v)
{ #define atomic_add_negative(_i, _v) (atomic_add_return(_i, _v) < 0)
return __CS_LOOP(v, i, "ar") < 0; #define atomic_inc(_v) atomic_add_return(1, _v)
} #define atomic_inc_return(_v) atomic_add_return(1, _v)
static __inline__ void atomic_sub(int i, atomic_t * v) #define atomic_inc_and_test(_v) (atomic_add_return(1, _v) == 0)
{
__CS_LOOP(v, i, "sr");
}
static __inline__ int atomic_sub_return(int i, atomic_t * v) static __inline__ int atomic_sub_return(int i, atomic_t * v)
{ {
return __CS_LOOP(v, i, "sr"); return __CS_LOOP(v, i, "sr");
} }
static __inline__ void atomic_inc(volatile atomic_t * v) #define atomic_sub(_i, _v) atomic_sub_return(_i, _v)
{ #define atomic_sub_and_test(_i, _v) (atomic_sub_return(_i, _v) == 0)
__CS_LOOP(v, 1, "ar"); #define atomic_dec(_v) atomic_sub_return(1, _v)
} #define atomic_dec_return(_v) atomic_sub_return(1, _v)
static __inline__ int atomic_inc_return(volatile atomic_t * v) #define atomic_dec_and_test(_v) (atomic_sub_return(1, _v) == 0)
{
return __CS_LOOP(v, 1, "ar");
}
static __inline__ int atomic_inc_and_test(volatile atomic_t * v)
{
return __CS_LOOP(v, 1, "ar") == 0;
}
static __inline__ void atomic_dec(volatile atomic_t * v)
{
__CS_LOOP(v, 1, "sr");
}
static __inline__ int atomic_dec_return(volatile atomic_t * v)
{
return __CS_LOOP(v, 1, "sr");
}
static __inline__ int atomic_dec_and_test(volatile atomic_t * v)
{
return __CS_LOOP(v, 1, "sr") == 0;
}
static __inline__ void atomic_clear_mask(unsigned long mask, atomic_t * v) static __inline__ void atomic_clear_mask(unsigned long mask, atomic_t * v)
{ {
__CS_LOOP(v, ~mask, "nr"); __CS_LOOP(v, ~mask, "nr");
} }
static __inline__ void atomic_set_mask(unsigned long mask, atomic_t * v) static __inline__ void atomic_set_mask(unsigned long mask, atomic_t * v)
{ {
__CS_LOOP(v, mask, "or"); __CS_LOOP(v, mask, "or");
} }
static __inline__ int atomic_cmpxchg(atomic_t *v, int old, int new)
{
__asm__ __volatile__(" cs %0,%3,0(%2)\n"
: "+d" (old), "=m" (v->counter)
: "a" (v), "d" (new), "m" (v->counter)
: "cc", "memory" );
return old;
}
static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
{
int c, old;
c = atomic_read(v);
while (c != u && (old = atomic_cmpxchg(v, c, c + a)) != c)
c = old;
return c != u;
}
#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
#undef __CS_LOOP #undef __CS_LOOP
#ifdef __s390x__ #ifdef __s390x__
...@@ -123,92 +121,61 @@ typedef struct { ...@@ -123,92 +121,61 @@ typedef struct {
#define atomic64_read(v) ((v)->counter) #define atomic64_read(v) ((v)->counter)
#define atomic64_set(v,i) (((v)->counter) = (i)) #define atomic64_set(v,i) (((v)->counter) = (i))
static __inline__ void atomic64_add(long long i, atomic64_t * v)
{
__CSG_LOOP(v, i, "agr");
}
static __inline__ long long atomic64_add_return(long long i, atomic64_t * v) static __inline__ long long atomic64_add_return(long long i, atomic64_t * v)
{ {
return __CSG_LOOP(v, i, "agr"); return __CSG_LOOP(v, i, "agr");
} }
static __inline__ long long atomic64_add_negative(long long i, atomic64_t * v) #define atomic64_add(_i, _v) atomic64_add_return(_i, _v)
{ #define atomic64_add_negative(_i, _v) (atomic64_add_return(_i, _v) < 0)
return __CSG_LOOP(v, i, "agr") < 0; #define atomic64_inc(_v) atomic64_add_return(1, _v)
} #define atomic64_inc_return(_v) atomic64_add_return(1, _v)
static __inline__ void atomic64_sub(long long i, atomic64_t * v) #define atomic64_inc_and_test(_v) (atomic64_add_return(1, _v) == 0)
{
__CSG_LOOP(v, i, "sgr"); static __inline__ long long atomic64_sub_return(long long i, atomic64_t * v)
}
static __inline__ void atomic64_inc(volatile atomic64_t * v)
{
__CSG_LOOP(v, 1, "agr");
}
static __inline__ long long atomic64_inc_return(volatile atomic64_t * v)
{
return __CSG_LOOP(v, 1, "agr");
}
static __inline__ long long atomic64_inc_and_test(volatile atomic64_t * v)
{
return __CSG_LOOP(v, 1, "agr") == 0;
}
static __inline__ void atomic64_dec(volatile atomic64_t * v)
{
__CSG_LOOP(v, 1, "sgr");
}
static __inline__ long long atomic64_dec_return(volatile atomic64_t * v)
{
return __CSG_LOOP(v, 1, "sgr");
}
static __inline__ long long atomic64_dec_and_test(volatile atomic64_t * v)
{ {
return __CSG_LOOP(v, 1, "sgr") == 0; return __CSG_LOOP(v, i, "sgr");
} }
#define atomic64_sub(_i, _v) atomic64_sub_return(_i, _v)
#define atomic64_sub_and_test(_i, _v) (atomic64_sub_return(_i, _v) == 0)
#define atomic64_dec(_v) atomic64_sub_return(1, _v)
#define atomic64_dec_return(_v) atomic64_sub_return(1, _v)
#define atomic64_dec_and_test(_v) (atomic64_sub_return(1, _v) == 0)
static __inline__ void atomic64_clear_mask(unsigned long mask, atomic64_t * v) static __inline__ void atomic64_clear_mask(unsigned long mask, atomic64_t * v)
{ {
__CSG_LOOP(v, ~mask, "ngr"); __CSG_LOOP(v, ~mask, "ngr");
} }
static __inline__ void atomic64_set_mask(unsigned long mask, atomic64_t * v) static __inline__ void atomic64_set_mask(unsigned long mask, atomic64_t * v)
{ {
__CSG_LOOP(v, mask, "ogr"); __CSG_LOOP(v, mask, "ogr");
} }
#undef __CSG_LOOP static __inline__ long long atomic64_cmpxchg(atomic64_t *v,
#endif long long old, long long new)
{
/* __asm__ __volatile__(" csg %0,%3,0(%2)\n"
returns 0 if expected_oldval==value in *v ( swap was successful ) : "+d" (old), "=m" (v->counter)
returns 1 if unsuccessful. : "a" (v), "d" (new), "m" (v->counter)
: "cc", "memory" );
return old;
}
This is non-portable, use bitops or spinlocks instead! static __inline__ int atomic64_add_unless(atomic64_t *v,
*/ long long a, long long u)
static __inline__ int
atomic_compare_and_swap(int expected_oldval,int new_val,atomic_t *v)
{ {
int retval; long long c, old;
__asm__ __volatile__( c = atomic64_read(v);
" lr %0,%3\n" while (c != u && (old = atomic64_cmpxchg(v, c, c + a)) != c)
" cs %0,%4,0(%2)\n" c = old;
" ipm %0\n" return c != u;
" srl %0,28\n"
"0:"
: "=&d" (retval), "=m" (v->counter)
: "a" (v), "d" (expected_oldval) , "d" (new_val),
"m" (v->counter) : "cc", "memory" );
return retval;
} }
#define atomic_cmpxchg(v, o, n) (atomic_compare_and_swap((o), (n), &((v)->counter))) #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
#define atomic_add_unless(v, a, u) \ #undef __CSG_LOOP
({ \ #endif
int c, old; \
c = atomic_read(v); \
while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \
c = old; \
c != (u); \
})
#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
#define smp_mb__before_atomic_dec() smp_mb() #define smp_mb__before_atomic_dec() smp_mb()
#define smp_mb__after_atomic_dec() smp_mb() #define smp_mb__after_atomic_dec() smp_mb()
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment