23 #if ! (defined(MARCH_i686) || defined(MARCH_x86_64))
24 #if defined(__LP64__) || defined(_LP64)
25 #define MARCH_x86_64 1
42 #define SIZEOF_ATOMIC_INT 4
44 #define HAS_ATOMIC_32 1
48 typedef volatile uint64_t cf_atomic64;
52 #define SIZEOF_ATOMIC_INT 8
54 #define HAS_ATOMIC_32 1
55 #define HAS_ATOMIC_64 1
84 #define cf_atomic64_get(a) (a)
85 #define cf_atomic64_set(a, b) (*(a) = (b))
88 cf_atomic64_add(cf_atomic64 *a, int64_t b)
93 __asm__ __volatile__ (
"lock; xaddq %0, %1" :
"+r" (b),
"+m" (*a) : :
"memory");
95 b = _InterlockedExchangeAdd64((LONGLONG *)a, b);
100 #define cf_atomic64_sub(a,b) (cf_atomic64_add((a), (0 - (b))))
101 #define cf_atomic64_incr(a) (cf_atomic64_add((a), 1))
102 #define cf_atomic64_decr(a) (cf_atomic64_add((a), -1))
111 static inline int64_t
112 cf_atomic64_cas(cf_atomic64 *a, int64_t b, int64_t x)
116 __asm__ __volatile__ (
"lock; cmpxchgq %1,%2" :
"=a"(p) :
"q"(x),
"m"(*(a)),
"0"(b) :
"memory");
121 #define cf_atomic64_cas_m(_a, _b, _x) \
122 ({ __typeof__(_b) __b = _b; \
123 __asm__ __volatile__ ("lock; cmpxchgq %1,%2" : "=a"(__b) : "q"(_x), "m"(*(_a)), "0"(_b) : "memory"); \
129 static inline int64_t
130 cf_atomic64_fas(cf_atomic64 *a, cf_atomic64 *b)
134 __asm__ __volatile__ (
"lock; xchgq %0,%1" :
"=r"(p) :
"m"(*(a)),
"0"(*(b)) :
"memory");
139 #define cf_atomic64_fas_m(_a, _b) \
140 ({ __typeof__(_b) __b; \
141 __asm__ __volatile__ ("lock; xchgq %0,%1" : "=r"(__b) : "m"(*(_a)), "0"(_b)); \
146 static inline int64_t
147 cf_atomic64_addunless(cf_atomic64 *a, int64_t b, int64_t x)
152 cur = cf_atomic64_get(*a);
161 prior = cf_atomic64_cas(a, cur, cur + x);
180 #define cf_atomic32_get(a) (a)
181 #define cf_atomic32_set(a, b) (*(a) = (b))
183 static inline int64_t
189 __asm__ __volatile__ (
"lock; xadd %0, %1" :
"+r" (b),
"+m" (*a) : :
"memory");
191 b = _InterlockedExchangeAdd((
volatile long *)a, b);
196 #define cf_atomic32_sub(a,b) (cf_atomic32_add((a), (0 - (b))))
197 #define cf_atomic32_incr(a) (cf_atomic32_add((a), 1))
198 #define cf_atomic32_decr(a) (cf_atomic32_add((a), -1))
206 static inline int32_t
207 cf_atomic32_cas(
cf_atomic32 *a, int32_t b, int32_t x)
211 __asm__ __volatile__ (
"lock; cmpxchg %1,%2" :
"=a"(p) :
"q"(x),
"m"(*(a)),
"0"(b) :
"memory");
216 #define cf_atomic32_cas_m(_a, _b, _x) \
217 ({ __typeof__(_b) __b = _b; \
218 __asm__ __volatile__ ("lock; cmpxchg %1,%2" : "=a"(__b) : "q"(_x), "m"(*(_a)), "0"(_b) : "memory"); \
224 static inline int32_t
229 __asm__ __volatile__ (
"lock; xchg %0,%1" :
"=r"(p) :
"m"(*(a)),
"0"(*(b)) :
"memory");
235 #define cf_atomic32_fas_m(_a, _b) \
236 ({ __typeof__(_b) __b; \
237 __asm__ __volatile__ ("lock; xchg %0,%1" : "=r"(__b) : "m"(*(_a)), "0"(_b)); \
242 static inline int32_t
243 cf_atomic32_addunless(
cf_atomic32 *a, int32_t b, int32_t x)
257 prior = cf_atomic32_cas(a, cur, cur + x);
275 #define cf_atomic_p_get(_a) cf_atomic32_get(_a)
276 #define cf_atomic_p_set(_a, _b) cf_atomic32_set(_a, _b)
277 #define cf_atomic_p_add(_a, _b) cf_atomic32_add(_a, _b)
278 #define cf_atomic_p_incr(_a) cf_atomic32_add((_a), 1)
279 #define cf_atomic_p_decr(_a) cf_atomic32_add((_a), -1)
282 #define cf_atomic_p_cas(_a, _b, _x) cf_atomic32_cas(_a, _b, _x)
283 #define cf_atomic_p_cas_m(_a, _b, _x) cf_atomic32_cas_m(_a, _b, _x)
284 #define cf_atomic_p_fas(_a, _b) cf_atomic32_fas(_a, _b)
285 #define cf_atomic_p_fas_m(_a, _b) cf_atomic32_fas_m(_a, _b)
286 #define cf_atomic_p_addunless(_a, _b, _x) cf_atomic32_addunless(_a, _b, _x)
289 #define cf_atomic_int_get(_a) cf_atomic32_get(_a)
290 #define cf_atomic_int_set(_a, _b) cf_atomic32_set(_a, _b)
291 #define cf_atomic_int_add(_a, _b) cf_atomic32_add(_a, _b)
292 #define cf_atomic_int_sub(_a, _b) cf_atomic32_sub(_a, _b)
293 #define cf_atomic_int_incr(_a) cf_atomic32_add((_a), 1)
294 #define cf_atomic_int_decr(_a) cf_atomic32_add((_a), -1)
297 #define cf_atomic_int_cas(_a, _b, _x) cf_atomic32_cas(_a, _b, _x)
298 #define cf_atomic_int_cas_m(_a, _b, _x) cf_atomic32_cas_m(_a, _b, _x)
299 #define cf_atomic_int_fas(_a, _b) cf_atomic32_fas(_a, _b)
300 #define cf_atomic_int_fas_m(_a, _b) cf_atomic32_fas_m(_a, _b)
301 #define cf_atomic_int_addunless(_a, _b, _x) cf_atomic32_addunless(_a, _b, _x)
310 #define cf_atomic_p_get(_a) cf_atomic64_get(_a)
311 #define cf_atomic_p_set(_a, _b) cf_atomic64_set(_a, _b)
312 #define cf_atomic_p_add(_a, _b) cf_atomic64_add(_a, _b)
313 #define cf_atomic_p_incr(_a) cf_atomic64_add((_a), 1)
314 #define cf_atomic_p_decr(_a) cf_atomic64_add((_a), -1)
317 #define cf_atomic_p_cas(_a, _b, _x) cf_atomic64_cas(_a, _b, _x)
318 #define cf_atomic_p_cas_m(_a, _b, _x) cf_atomic64_cas_m(_a, _b, _x)
319 #define cf_atomic_p_fas(_a, _b) cf_atomic64_fas(_a, _b)
320 #define cf_atomic_p_fas_m(_a, _b) cf_atomic64_fas_m(_a, _b)
321 #define cf_atomic_p_addunless(_a, _b, _x) cf_atomic64_addunless(_a, _b, _x)
324 #define cf_atomic_int_get(_a) cf_atomic64_get(_a)
325 #define cf_atomic_int_set(_a, _b) cf_atomic64_set(_a, _b)
326 #define cf_atomic_int_add(_a, _b) cf_atomic64_add(_a, _b)
327 #define cf_atomic_int_sub(_a, _b) cf_atomic64_sub(_a, _b)
328 #define cf_atomic_int_incr(_a) cf_atomic64_add((_a), 1)
329 #define cf_atomic_int_decr(_a) cf_atomic64_add((_a), -1)
332 #define cf_atomic_int_cas(_a, _b, _x) cf_atomic64_cas(_a, _b, _x)
333 #define cf_atomic_int_cas_m(_a, _b, _x) cf_atomic64_cas_m(_a, _b, _x)
334 #define cf_atomic_int_fas(_a, _b) cf_atomic64_fas(_a, _b)
335 #define cf_atomic_int_fas_m(_a, _b) cf_atomic64_fas_m(_a, _b)
336 #define cf_atomic_int_addunless(_a, _b, _x) cf_atomic64_addunless(_a, _b, _x)
347 #define smb_mb() asm volatile("mfence":::"memory")
349 #define smb_mb() _ReadWriteBarrier()
357 #define CF_MEMORY_BARRIER() __asm__ __volatile__ ("lock; addl $0,0(%%esp)" : : : "memory")
361 #define CF_MEMORY_BARRIER_READ() CF_MEMORY_BARRIER()
365 #define CF_MEMORY_BARRIER_WRITE() __asm__ __volatile__ ("" : : : "memory")