Commit 8c4206a3 authored by serg@serg.mylan's avatar serg@serg.mylan

Merge bk-internal.mysql.com:/home/bk/mysql-5.1

into serg.mylan:/usr/home/serg/Abk/mysql-5.1
parents 5d3aa23d 48c453a4
...@@ -1728,7 +1728,7 @@ static void DoPrefix(CODE_STATE *cs, uint _line_) ...@@ -1728,7 +1728,7 @@ static void DoPrefix(CODE_STATE *cs, uint _line_)
struct tm *tm_p; struct tm *tm_p;
if (gettimeofday(&tv, NULL) != -1) if (gettimeofday(&tv, NULL) != -1)
{ {
if ((tm_p= localtime(&tv.tv_sec))) if ((tm_p= localtime((const time_t *)&tv.tv_sec)))
{ {
(void) fprintf (cs->stack->out_file, (void) fprintf (cs->stack->out_file,
/* "%04d-%02d-%02d " */ /* "%04d-%02d-%02d " */
......
...@@ -27,137 +27,9 @@ ...@@ -27,137 +27,9 @@
#endif #endif
#endif #endif
#ifdef make_atomic_add_body8 #ifdef make_atomic_add_body
#ifdef HAVE_INLINE typedef struct { } my_atomic_rwlock_t;
#define make_atomic_add(S) \
static inline uint ## S _my_atomic_add ## S( \
my_atomic_ ## S ## _t *a, uint ## S v) \
{ \
make_atomic_add_body ## S; \
return v; \
}
#define make_atomic_swap(S) \
static inline uint ## S _my_atomic_swap ## S( \
my_atomic_ ## S ## _t *a, uint ## S v) \
{ \
make_atomic_swap_body ## S; \
return v; \
}
#define make_atomic_cas(S) \
static inline uint _my_atomic_cas ## S(my_atomic_ ## S ## _t *a,\
uint ## S *cmp, uint ## S set) \
{ \
uint8 ret; \
make_atomic_cas_body ## S; \
return ret; \
}
#define make_atomic_load(S) \
static inline uint ## S _my_atomic_load ## S( \
my_atomic_ ## S ## _t *a) \
{ \
uint ## S ret; \
make_atomic_load_body ## S; \
return ret; \
}
#define make_atomic_store(S) \
static inline void _my_atomic_store ## S( \
my_atomic_ ## S ## _t *a, uint ## S v) \
{ \
make_atomic_store_body ## S; \
}
#else /* no inline functions */
#define make_atomic_add(S) \
extern uint ## S _my_atomic_add ## S( \
my_atomic_ ## S ## _t *a, uint ## S v);
#define make_atomic_swap(S) \
extern uint ## S _my_atomic_swap ## S( \
my_atomic_ ## S ## _t *a, uint ## S v);
#define make_atomic_cas(S) \
extern uint _my_atomic_cas ## S(my_atomic_ ## S ## _t *a, \
uint ## S *cmp, uint ## S set);
#define make_atomic_load(S) \
extern uint ## S _my_atomic_load ## S( \
my_atomic_ ## S ## _t *a);
#define make_atomic_store(S) \
extern void _my_atomic_store ## S( \
my_atomic_ ## S ## _t *a, uint ## S v);
#endif
make_atomic_add( 8)
make_atomic_add(16)
make_atomic_add(32)
make_atomic_cas( 8)
make_atomic_cas(16)
make_atomic_cas(32)
make_atomic_load( 8)
make_atomic_load(16)
make_atomic_load(32)
make_atomic_store( 8)
make_atomic_store(16)
make_atomic_store(32)
make_atomic_swap( 8)
make_atomic_swap(16)
make_atomic_swap(32)
#undef make_atomic_add_body8
#undef make_atomic_cas_body8
#undef make_atomic_load_body8
#undef make_atomic_store_body8
#undef make_atomic_swap_body8
#undef make_atomic_add_body16
#undef make_atomic_cas_body16
#undef make_atomic_load_body16
#undef make_atomic_store_body16
#undef make_atomic_swap_body16
#undef make_atomic_add_body32
#undef make_atomic_cas_body32
#undef make_atomic_load_body32
#undef make_atomic_store_body32
#undef make_atomic_swap_body32
#undef make_atomic_add
#undef make_atomic_cas
#undef make_atomic_load
#undef make_atomic_store
#undef make_atomic_swap
#define my_atomic_add8(a,v,L) _my_atomic_add8(a,v)
#define my_atomic_add16(a,v,L) _my_atomic_add16(a,v)
#define my_atomic_add32(a,v,L) _my_atomic_add32(a,v)
#define my_atomic_cas8(a,c,v,L) _my_atomic_cas8(a,c,v)
#define my_atomic_cas16(a,c,v,L) _my_atomic_cas16(a,c,v)
#define my_atomic_cas32(a,c,v,L) _my_atomic_cas32(a,c,v)
#define my_atomic_load8(a,L) _my_atomic_load8(a)
#define my_atomic_load16(a,L) _my_atomic_load16(a)
#define my_atomic_load32(a,L) _my_atomic_load32(a)
#define my_atomic_store8(a,v,L) _my_atomic_store8(a,v)
#define my_atomic_store16(a,v,L) _my_atomic_store16(a,v)
#define my_atomic_store32(a,v,L) _my_atomic_store32(a,v)
#define my_atomic_swap8(a,v,L) _my_atomic_swap8(a,v)
#define my_atomic_swap16(a,v,L) _my_atomic_swap16(a,v)
#define my_atomic_swap32(a,v,L) _my_atomic_swap32(a,v)
#define my_atomic_rwlock_t typedef int
#define my_atomic_rwlock_destroy(name) #define my_atomic_rwlock_destroy(name)
#define my_atomic_rwlock_init(name) #define my_atomic_rwlock_init(name)
#define my_atomic_rwlock_rdlock(name) #define my_atomic_rwlock_rdlock(name)
......
...@@ -16,12 +16,6 @@ ...@@ -16,12 +16,6 @@
typedef struct {pthread_rwlock_t rw;} my_atomic_rwlock_t; typedef struct {pthread_rwlock_t rw;} my_atomic_rwlock_t;
#ifdef MY_ATOMIC_EXTRA_DEBUG
#define CHECK_RW if (rw) if (a->rw) assert(rw == a->rw); else a->rw=rw;
#else
#define CHECK_RW
#endif
#ifdef MY_ATOMIC_MODE_DUMMY #ifdef MY_ATOMIC_MODE_DUMMY
/* /*
the following can never be enabled by ./configure, one need to put #define in the following can never be enabled by ./configure, one need to put #define in
...@@ -36,6 +30,7 @@ typedef struct {pthread_rwlock_t rw;} my_atomic_rwlock_t; ...@@ -36,6 +30,7 @@ typedef struct {pthread_rwlock_t rw;} my_atomic_rwlock_t;
#define my_atomic_rwlock_wrlock(name) #define my_atomic_rwlock_wrlock(name)
#define my_atomic_rwlock_rdunlock(name) #define my_atomic_rwlock_rdunlock(name)
#define my_atomic_rwlock_wrunlock(name) #define my_atomic_rwlock_wrunlock(name)
#define MY_ATOMIC_MODE "dummy (non-atomic)"
#else #else
#define my_atomic_rwlock_destroy(name) pthread_rwlock_destroy(& (name)->rw) #define my_atomic_rwlock_destroy(name) pthread_rwlock_destroy(& (name)->rw)
#define my_atomic_rwlock_init(name) pthread_rwlock_init(& (name)->rw, 0) #define my_atomic_rwlock_init(name) pthread_rwlock_init(& (name)->rw, 0)
...@@ -43,119 +38,12 @@ typedef struct {pthread_rwlock_t rw;} my_atomic_rwlock_t; ...@@ -43,119 +38,12 @@ typedef struct {pthread_rwlock_t rw;} my_atomic_rwlock_t;
#define my_atomic_rwlock_wrlock(name) pthread_rwlock_wrlock(& (name)->rw) #define my_atomic_rwlock_wrlock(name) pthread_rwlock_wrlock(& (name)->rw)
#define my_atomic_rwlock_rdunlock(name) pthread_rwlock_unlock(& (name)->rw) #define my_atomic_rwlock_rdunlock(name) pthread_rwlock_unlock(& (name)->rw)
#define my_atomic_rwlock_wrunlock(name) pthread_rwlock_unlock(& (name)->rw) #define my_atomic_rwlock_wrunlock(name) pthread_rwlock_unlock(& (name)->rw)
#define MY_ATOMIC_MODE "rwlocks"
#endif #endif
#ifdef HAVE_INLINE #define make_atomic_add_body(S) int ## S sav; sav= *a; *a+= v; v=sav;
#define make_atomic_swap_body(S) int ## S sav; sav= *a; *a= v; v=sav;
#define make_atomic_add(S) \ #define make_atomic_cas_body(S) if ((ret= (*a == *cmp))) *a= set; else *cmp=*a;
static inline uint ## S my_atomic_add ## S( \ #define make_atomic_load_body(S) ret= *a;
my_atomic_ ## S ## _t *a, uint ## S v, my_atomic_rwlock_t *rw) \ #define make_atomic_store_body(S) *a= v;
{ \
uint ## S ret; \
CHECK_RW; \
if (rw) my_atomic_rwlock_wrlock(rw); \
ret= a->val; \
a->val+= v; \
if (rw) my_atomic_rwlock_wrunlock(rw); \
return ret; \
}
#define make_atomic_swap(S) \
static inline uint ## S my_atomic_swap ## S( \
my_atomic_ ## S ## _t *a, uint ## S v, my_atomic_rwlock_t *rw) \
{ \
uint ## S ret; \
CHECK_RW; \
if (rw) my_atomic_rwlock_wrlock(rw); \
ret= a->val; \
a->val= v; \
if (rw) my_atomic_rwlock_wrunlock(rw); \
return ret; \
}
#define make_atomic_cas(S) \
static inline uint my_atomic_cas ## S(my_atomic_ ## S ## _t *a, \
uint ## S *cmp, uint ## S set, my_atomic_rwlock_t *rw) \
{ \
uint ret; \
CHECK_RW; \
if (rw) my_atomic_rwlock_wrlock(rw); \
if (ret= (a->val == *cmp)) a->val= set; else *cmp=a->val; \
if (rw) my_atomic_rwlock_wrunlock(rw); \
return ret; \
}
#define make_atomic_load(S) \
static inline uint ## S my_atomic_load ## S( \
my_atomic_ ## S ## _t *a, my_atomic_rwlock_t *rw) \
{ \
uint ## S ret; \
CHECK_RW; \
if (rw) my_atomic_rwlock_wrlock(rw); \
ret= a->val; \
if (rw) my_atomic_rwlock_wrunlock(rw); \
return ret; \
}
#define make_atomic_store(S) \
static inline void my_atomic_store ## S( \
my_atomic_ ## S ## _t *a, uint ## S v, my_atomic_rwlock_t *rw) \
{ \
CHECK_RW; \
if (rw) my_atomic_rwlock_rdlock(rw); \
(a)->val= (v); \
if (rw) my_atomic_rwlock_rdunlock(rw); \
}
#else /* no inline functions */
#define make_atomic_add(S) \
extern uint ## S my_atomic_add ## S( \
my_atomic_ ## S ## _t *a, uint ## S v, my_atomic_rwlock_t *rw);
#define make_atomic_swap(S) \
extern uint ## S my_atomic_swap ## S( \
my_atomic_ ## S ## _t *a, uint ## S v, my_atomic_rwlock_t *rw);
#define make_atomic_cas(S) \
extern uint my_atomic_cas ## S(my_atomic_ ## S ## _t *a, \
uint ## S *cmp, uint ## S set, my_atomic_rwlock_t *rw);
#define make_atomic_load(S) \
extern uint ## S my_atomic_load ## S( \
my_atomic_ ## S ## _t *a, my_atomic_rwlock_t *rw);
#define make_atomic_store(S) \
extern void my_atomic_store ## S( \
my_atomic_ ## S ## _t *a, uint ## S v, my_atomic_rwlock_t *rw);
#endif
make_atomic_add( 8)
make_atomic_add(16)
make_atomic_add(32)
make_atomic_add(64)
make_atomic_cas( 8)
make_atomic_cas(16)
make_atomic_cas(32)
make_atomic_cas(64)
make_atomic_load( 8)
make_atomic_load(16)
make_atomic_load(32)
make_atomic_load(64)
make_atomic_store( 8)
make_atomic_store(16)
make_atomic_store(32)
make_atomic_store(64)
make_atomic_swap( 8)
make_atomic_swap(16)
make_atomic_swap(32)
make_atomic_swap(64)
#undef make_atomic_add
#undef make_atomic_cas
#undef make_atomic_load
#undef make_atomic_store
#undef make_atomic_swap
#undef CHECK_RW
...@@ -16,44 +16,38 @@ ...@@ -16,44 +16,38 @@
/* /*
XXX 64-bit atomic operations can be implemented using XXX 64-bit atomic operations can be implemented using
cmpxchg8b, if necessary cmpxchg8b, if necessary. Though I've heard that not all 64-bit
architectures support double-word (128-bit) cas.
*/ */
#define MY_ATOMIC_MODE "gcc-x86" ## LOCK
/* fix -ansi errors while maintaining readability */ /* fix -ansi errors while maintaining readability */
#ifndef asm
#define asm __asm__ #define asm __asm__
#endif
#define make_atomic_add_body8 \ #define make_atomic_add_body(S) \
asm volatile (LOCK "xadd %0, %1;" : "+r" (v) , "+m" (a->val)) asm volatile (LOCK "xadd %0, %1;" : "+r" (v) , "+m" (*a))
#define make_atomic_swap_body8 \ #define make_atomic_swap_body(S) \
asm volatile ("xchg %0, %1;" : "+r" (v) , "+m" (a->val)) asm volatile ("xchg %0, %1;" : "+r" (v) , "+m" (*a))
#define make_atomic_cas_body8 \ #define make_atomic_cas_body(S) \
asm volatile (LOCK "cmpxchg %3, %0; setz %2;" \ asm volatile (LOCK "cmpxchg %3, %0; setz %2;" \
: "+m" (a->val), "+a" (*cmp), "=q" (ret): "r" (set)) : "+m" (*a), "+a" (*cmp), "=q" (ret): "r" (set))
#ifdef MY_ATOMIC_MODE_DUMMY #ifdef MY_ATOMIC_MODE_DUMMY
#define make_atomic_load_body8 ret=a->val #define make_atomic_load_body(S) ret=*a
#define make_atomic_store_body8 a->val=v #define make_atomic_store_body(S) *a=v
#else #else
/* /*
Actually 32-bit reads/writes are always atomic on x86 Actually 32-bit reads/writes are always atomic on x86
But we add LOCK here anyway to force memory barriers But we add LOCK here anyway to force memory barriers
*/ */
#define make_atomic_load_body8 \ #define make_atomic_load_body(S) \
ret=0; \ ret=0; \
asm volatile (LOCK "cmpxchg %2, %0" \ asm volatile (LOCK "cmpxchg %2, %0" \
: "+m" (a->val), "+a" (ret): "r" (ret)) : "+m" (*a), "+a" (ret): "r" (ret))
#define make_atomic_store_body8 \ #define make_atomic_store_body(S) \
asm volatile ("xchg %0, %1;" : "+m" (a->val) : "r" (v)) asm volatile ("xchg %0, %1;" : "+m" (*a) : "r" (v))
#endif #endif
#define make_atomic_add_body16 make_atomic_add_body8
#define make_atomic_add_body32 make_atomic_add_body8
#define make_atomic_cas_body16 make_atomic_cas_body8
#define make_atomic_cas_body32 make_atomic_cas_body8
#define make_atomic_load_body16 make_atomic_load_body8
#define make_atomic_load_body32 make_atomic_load_body8
#define make_atomic_store_body16 make_atomic_store_body8
#define make_atomic_store_body32 make_atomic_store_body8
#define make_atomic_swap_body16 make_atomic_swap_body8
#define make_atomic_swap_body32 make_atomic_swap_body8
...@@ -23,63 +23,75 @@ ...@@ -23,63 +23,75 @@
// (InterlockedCompareExchange, InterlockedCompareExchange16 // (InterlockedCompareExchange, InterlockedCompareExchange16
// InterlockedExchangeAdd, InterlockedExchange) // InterlockedExchangeAdd, InterlockedExchange)
#define make_atomic_add_body(REG) \ #ifndef _atomic_h_cleanup_
#define _atomic_h_cleanup_ "atomic/x86-msvc.h"
#define MY_ATOMIC_MODE "msvc-x86" ## LOCK
#define make_atomic_add_body(S) \
_asm { \ _asm { \
_asm mov REG, v \ _asm mov reg_ ## S, v \
_asm LOCK xadd a->val, REG \ _asm LOCK xadd *a, reg_ ## S \
_asm movzx v, REG \ _asm movzx v, reg_ ## S \
} }
#define make_atomic_cas_body(AREG,REG2) \ #define make_atomic_cas_body(S) \
_asm { \ _asm { \
_asm mov AREG, *cmp \ _asm mov areg_ ## S, *cmp \
_asm mov REG2, set \ _asm mov reg2_ ## S, set \
_asm LOCK cmpxchg a->val, REG2 \ _asm LOCK cmpxchg *a, reg2_ ## S \
_asm mov *cmp, AREG \ _asm mov *cmp, areg_ ## S \
_asm setz al \ _asm setz al \
_asm movzx ret, al \ _asm movzx ret, al \
} }
#define make_atomic_swap_body(REG) \ #define make_atomic_swap_body(S) \
_asm { \ _asm { \
_asm mov REG, v \ _asm mov reg_ ## S, v \
_asm xchg a->val, REG \ _asm xchg *a, reg_ ## S \
_asm mov v, REG \ _asm mov v, reg_ ## S \
} }
#ifdef MY_ATOMIC_MODE_DUMMY #ifdef MY_ATOMIC_MODE_DUMMY
#define make_atomic_load_body(AREG,REG) ret=a->val #define make_atomic_load_body(S) ret=*a
#define make_atomic_store_body(REG) a->val=v #define make_atomic_store_body(S) *a=v
#else #else
/* /*
Actually 32-bit reads/writes are always atomic on x86 Actually 32-bit reads/writes are always atomic on x86
But we add LOCK here anyway to force memory barriers But we add LOCK here anyway to force memory barriers
*/ */
#define make_atomic_load_body(AREG,REG2) \ #define make_atomic_load_body(S) \
_asm { \ _asm { \
_asm mov AREG, 0 \ _asm mov areg_ ## S, 0 \
_asm mov REG2, AREG \ _asm mov reg2_ ## S, areg_ ## S \
_asm LOCK cmpxchg a->val, REG2 \ _asm LOCK cmpxchg *a, reg2_ ## S \
_asm mov ret, AREG \ _asm mov ret, areg_ ## S \
} }
#define make_atomic_store_body(REG) \ #define make_atomic_store_body(S) \
_asm { \ _asm { \
_asm mov REG, v \ _asm mov reg_ ## S, v \
_asm xchg a->val, REG \ _asm xchg *a, reg_ ## S \
} }
#endif #endif
#define make_atomic_add_body8 make_atomic_add_body(al) #define reg_8 al
#define make_atomic_add_body16 make_atomic_add_body(ax) #define reg_16 ax
#define make_atomic_add_body32 make_atomic_add_body(eax) #define reg_32 eax
#define make_atomic_cas_body8 make_atomic_cas_body(al, bl) #define areg_8 al
#define make_atomic_cas_body16 make_atomic_cas_body(ax, bx) #define areg_16 ax
#define make_atomic_cas_body32 make_atomic_cas_body(eax, ebx) #define areg_32 eax
#define make_atomic_load_body8 make_atomic_load_body(al, bl) #define reg2_8 bl
#define make_atomic_load_body16 make_atomic_load_body(ax, bx) #define reg2_16 bx
#define make_atomic_load_body32 make_atomic_load_body(eax, ebx) #define reg2_32 ebx
#define make_atomic_store_body8 make_atomic_store_body(al)
#define make_atomic_store_body16 make_atomic_store_body(ax) #else /* cleanup */
#define make_atomic_store_body32 make_atomic_store_body(eax)
#define make_atomic_swap_body8 make_atomic_swap_body(al) #undef reg_8
#define make_atomic_swap_body16 make_atomic_swap_body(ax) #undef reg_16
#define make_atomic_swap_body32 make_atomic_swap_body(eax) #undef reg_32
#undef areg_8
#undef areg_16
#undef areg_32
#undef reg2_8
#undef reg2_16
#undef reg2_32
#endif
...@@ -14,21 +14,9 @@ ...@@ -14,21 +14,9 @@
along with this program; if not, write to the Free Software along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
#ifndef atomic_rwlock_init #ifndef my_atomic_rwlock_init
#ifdef MY_ATOMIC_EXTRA_DEBUG
#ifndef MY_ATOMIC_MODE_RWLOCKS
#error MY_ATOMIC_EXTRA_DEBUG can be only used with MY_ATOMIC_MODE_RWLOCKS
#endif
#define LOCK_PTR void *rw;
#else
#define LOCK_PTR
#endif
typedef volatile struct {uint8 val; LOCK_PTR} my_atomic_8_t; #define intptr void *
typedef volatile struct {uint16 val; LOCK_PTR} my_atomic_16_t;
typedef volatile struct {uint32 val; LOCK_PTR} my_atomic_32_t;
typedef volatile struct {uint64 val; LOCK_PTR} my_atomic_64_t;
#ifndef MY_ATOMIC_MODE_RWLOCKS #ifndef MY_ATOMIC_MODE_RWLOCKS
#include "atomic/nolock.h" #include "atomic/nolock.h"
...@@ -38,6 +26,103 @@ typedef volatile struct {uint64 val; LOCK_PTR} my_atomic_64_t; ...@@ -38,6 +26,103 @@ typedef volatile struct {uint64 val; LOCK_PTR} my_atomic_64_t;
#include "atomic/rwlock.h" #include "atomic/rwlock.h"
#endif #endif
#ifdef HAVE_INLINE
#define make_atomic_add(S) \
static inline int ## S my_atomic_add ## S( \
int ## S volatile *a, int ## S v) \
{ \
make_atomic_add_body(S); \
return v; \
}
#define make_atomic_swap(S) \
static inline int ## S my_atomic_swap ## S( \
int ## S volatile *a, int ## S v) \
{ \
make_atomic_swap_body(S); \
return v; \
}
#define make_atomic_cas(S) \
static inline int my_atomic_cas ## S(int ## S volatile *a, \
int ## S *cmp, int ## S set) \
{ \
int8 ret; \
make_atomic_cas_body(S); \
return ret; \
}
#define make_atomic_load(S) \
static inline int ## S my_atomic_load ## S(int ## S volatile *a) \
{ \
int ## S ret; \
make_atomic_load_body(S); \
return ret; \
}
#define make_atomic_store(S) \
static inline void my_atomic_store ## S( \
int ## S volatile *a, int ## S v) \
{ \
make_atomic_store_body(S); \
}
#else /* no inline functions */
#define make_atomic_add(S) \
extern int ## S my_atomic_add ## S(int ## S volatile *a, int ## S v);
#define make_atomic_swap(S) \
extern int ## S my_atomic_swap ## S(int ## S volatile *a, int ## S v);
#define make_atomic_cas(S) \
extern int my_atomic_cas ## S(int ## S volatile *a, int ## S *cmp, int ## S set);
#define make_atomic_load(S) \
extern int ## S my_atomic_load ## S(int ## S volatile *a);
#define make_atomic_store(S) \
extern void my_atomic_store ## S(int ## S volatile *a, int ## S v);
#endif
make_atomic_add( 8)
make_atomic_add(16)
make_atomic_add(32)
make_atomic_cas( 8)
make_atomic_cas(16)
make_atomic_cas(32)
make_atomic_cas(ptr)
make_atomic_load( 8)
make_atomic_load(16)
make_atomic_load(32)
make_atomic_load(ptr)
make_atomic_store( 8)
make_atomic_store(16)
make_atomic_store(32)
make_atomic_store(ptr)
make_atomic_swap( 8)
make_atomic_swap(16)
make_atomic_swap(32)
make_atomic_swap(ptr)
#undef make_atomic_add
#undef make_atomic_cas
#undef make_atomic_load
#undef make_atomic_store
#undef make_atomic_swap
#undef intaptr
#ifdef _atomic_h_cleanup_
#include _atomic_h_cleanup_
#undef _atomic_h_cleanup_
#endif
#define MY_ATOMIC_OK 0 #define MY_ATOMIC_OK 0
#define MY_ATOMIC_NOT_1CPU 1 #define MY_ATOMIC_NOT_1CPU 1
extern int my_atomic_initialize(); extern int my_atomic_initialize();
......
...@@ -14,8 +14,8 @@ ...@@ -14,8 +14,8 @@
tests are skipped for an unknown reason. tests are skipped for an unknown reason.
*/ */
int main() { int main() {
ok(1, NULL); ok(1, " ");
ok(1, NULL); ok(1, " ");
ok(1, NULL); ok(1, " ");
return exit_status(); return exit_status();
} }
...@@ -2,10 +2,10 @@ ...@@ -2,10 +2,10 @@
AM_CPPFLAGS = @ZLIB_INCLUDES@ -I$(top_builddir)/include AM_CPPFLAGS = @ZLIB_INCLUDES@ -I$(top_builddir)/include
AM_CPPFLAGS += -I$(top_srcdir)/include -I$(top_srcdir)/unittest/mytap AM_CPPFLAGS += -I$(top_srcdir)/include -I$(top_srcdir)/unittest/mytap
AM_LDFLAGS = -L$(top_builddir)/unittest/mytap -L$(top_builddir)/mysys LDADD = $(top_builddir)/unittest/mytap/libmytap.a \
AM_LDFLAGS += -L$(top_builddir)/strings -L$(top_builddir)/dbug $(top_builddir)/mysys/libmysys.a \
$(top_builddir)/dbug/libdbug.a \
LDADD = -lmytap -lmysys -ldbug -lmystrings $(top_builddir)/strings/libmystrings.a
noinst_PROGRAMS = bitmap-t base64-t my_atomic-t noinst_PROGRAMS = bitmap-t base64-t my_atomic-t
...@@ -14,13 +14,12 @@ ...@@ -14,13 +14,12 @@
along with this program; if not, write to the Free Software along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
#include <tap.h>
#include <my_global.h> #include <my_global.h>
#include <tap.h>
#include <my_sys.h> #include <my_sys.h>
#include <my_atomic.h> #include <my_atomic.h>
my_atomic_32_t a32,b32,c32; int32 a32,b32,c32;
my_atomic_rwlock_t rwl; my_atomic_rwlock_t rwl;
pthread_attr_t thr_attr; pthread_attr_t thr_attr;
...@@ -36,8 +35,13 @@ pthread_handler_t test_atomic_add_handler(void *arg) ...@@ -36,8 +35,13 @@ pthread_handler_t test_atomic_add_handler(void *arg)
for (x=((int)(&m)); m ; m--) for (x=((int)(&m)); m ; m--)
{ {
x=x*m+0x87654321; x=x*m+0x87654321;
my_atomic_add32(&a32, x, &rwl); my_atomic_rwlock_wrlock(&rwl);
my_atomic_add32(&a32, -x, &rwl); my_atomic_add32(&a32, x);
my_atomic_rwlock_wrunlock(&rwl);
my_atomic_rwlock_wrlock(&rwl);
my_atomic_add32(&a32, -x);
my_atomic_rwlock_wrunlock(&rwl);
} }
pthread_mutex_lock(&mutex); pthread_mutex_lock(&mutex);
N--; N--;
...@@ -57,17 +61,33 @@ pthread_handler_t test_atomic_add_handler(void *arg) ...@@ -57,17 +61,33 @@ pthread_handler_t test_atomic_add_handler(void *arg)
pthread_handler_t test_atomic_swap_handler(void *arg) pthread_handler_t test_atomic_swap_handler(void *arg)
{ {
int m=*(int *)arg; int m=*(int *)arg;
uint32 x=my_atomic_add32(&b32, 1, &rwl); int32 x;
my_atomic_rwlock_wrlock(&rwl);
x=my_atomic_add32(&b32, 1);
my_atomic_rwlock_wrunlock(&rwl);
my_atomic_add32(&a32, x, &rwl); my_atomic_rwlock_wrlock(&rwl);
my_atomic_add32(&a32, x);
my_atomic_rwlock_wrunlock(&rwl);
for (; m ; m--) for (; m ; m--)
x=my_atomic_swap32(&c32, x,&rwl); {
my_atomic_rwlock_wrlock(&rwl);
x=my_atomic_swap32(&c32, x);
my_atomic_rwlock_wrunlock(&rwl);
}
if (!x) if (!x)
x=my_atomic_swap32(&c32, x,&rwl); {
my_atomic_rwlock_wrlock(&rwl);
x=my_atomic_swap32(&c32, x);
my_atomic_rwlock_wrunlock(&rwl);
}
my_atomic_add32(&a32, -x, &rwl); my_atomic_rwlock_wrlock(&rwl);
my_atomic_add32(&a32, -x);
my_atomic_rwlock_wrunlock(&rwl);
pthread_mutex_lock(&mutex); pthread_mutex_lock(&mutex);
N--; N--;
...@@ -82,14 +102,25 @@ pthread_handler_t test_atomic_swap_handler(void *arg) ...@@ -82,14 +102,25 @@ pthread_handler_t test_atomic_swap_handler(void *arg)
*/ */
pthread_handler_t test_atomic_cas_handler(void *arg) pthread_handler_t test_atomic_cas_handler(void *arg)
{ {
int m=*(int *)arg; int m=*(int *)arg, ok;
int32 x; int32 x,y;
for (x=((int)(&m)); m ; m--) for (x=((int)(&m)); m ; m--)
{ {
uint32 y=my_atomic_load32(&a32, &rwl); my_atomic_rwlock_wrlock(&rwl);
y=my_atomic_load32(&a32);
my_atomic_rwlock_wrunlock(&rwl);
x=x*m+0x87654321; x=x*m+0x87654321;
while (!my_atomic_cas32(&a32, &y, y+x, &rwl)) ; do {
while (!my_atomic_cas32(&a32, &y, y-x, &rwl)) ; my_atomic_rwlock_wrlock(&rwl);
ok=my_atomic_cas32(&a32, &y, y+x);
my_atomic_rwlock_wrunlock(&rwl);
} while (!ok);
do {
my_atomic_rwlock_wrlock(&rwl);
ok=my_atomic_cas32(&a32, &y, y-x);
my_atomic_rwlock_wrunlock(&rwl);
} while (!ok);
} }
pthread_mutex_lock(&mutex); pthread_mutex_lock(&mutex);
N--; N--;
...@@ -103,9 +134,9 @@ void test_atomic(const char *test, pthread_handler handler, int n, int m) ...@@ -103,9 +134,9 @@ void test_atomic(const char *test, pthread_handler handler, int n, int m)
pthread_t t; pthread_t t;
ulonglong now=my_getsystime(); ulonglong now=my_getsystime();
my_atomic_store32(&a32, 0, &rwl); a32= 0;
my_atomic_store32(&b32, 0, &rwl); b32= 0;
my_atomic_store32(&c32, 0, &rwl); c32= 0;
diag("Testing %s with %d threads, %d iterations... ", test, n, m); diag("Testing %s with %d threads, %d iterations... ", test, n, m);
for (N=n ; n ; n--) for (N=n ; n ; n--)
...@@ -116,8 +147,7 @@ void test_atomic(const char *test, pthread_handler handler, int n, int m) ...@@ -116,8 +147,7 @@ void test_atomic(const char *test, pthread_handler handler, int n, int m)
pthread_cond_wait(&cond, &mutex); pthread_cond_wait(&cond, &mutex);
pthread_mutex_unlock(&mutex); pthread_mutex_unlock(&mutex);
now=my_getsystime()-now; now=my_getsystime()-now;
ok(my_atomic_load32(&a32, &rwl) == 0, ok(a32 == 0, "tested %s in %g secs", test, ((double)now)/1e7);
"tested %s in %g secs", test, ((double)now)/1e7);
} }
int main() int main()
......
...@@ -199,8 +199,8 @@ int exit_status() { ...@@ -199,8 +199,8 @@ int exit_status() {
if (g_test.plan != g_test.last) if (g_test.plan != g_test.last)
{ {
diag("%d tests planned but only %d executed", diag("%d tests planned but%s %d executed",
g_test.plan, g_test.last); g_test.plan, (g_test.plan > g_test.last ? " only" : ""), g_test.last);
return EXIT_FAILURE; return EXIT_FAILURE;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment