hce-node application  1.4.3
HCE Hierarchical Cluster Engine node application
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros
cf_atomic.h
Go to the documentation of this file.
1 /*
2  * Citrusleaf Foundation
3  * include/atomic.h - atomic memory operations
4  *
5  * Copyright 2008 by Citrusleaf. All rights reserved.
6  * THIS IS UNPUBLISHED PROPRIETARY SOURCE CODE. THE COPYRIGHT NOTICE
7  * ABOVE DOES NOT EVIDENCE ANY ACTUAL OR INTENDED PUBLICATION.
8  */
9 #pragma once
10 
11 #include <stdint.h>
12 
13 #ifdef CF_WINDOWS
14 #include <intrin.h>
15 #include <WinSock2.h> // LONGLONG
16 #endif
17 
18 /* SYNOPSIS
19  * Atomic memory operations
20  * Memory barriers
21  * > */
22 
23 #if ! (defined(MARCH_i686) || defined(MARCH_x86_64))
24 #if defined(__LP64__) || defined(_LP64)
25 #define MARCH_x86_64 1
26 #else
27 #define MARCH_i686 1
28 #endif
29 #endif
30 
31 /* Atomic memory operations */
32 
33 /* cf_atomic64
34  * An implementation of an atomic 64-bit data type
35  * NB: some older compilers may exhibit aliasing bugs that require this
36  * definition to be wrapped in a structure */
37 
38 #ifdef MARCH_i686
39 typedef volatile uint32_t cf_atomic32;
40 typedef volatile uint32_t cf_atomic_p;
41 typedef volatile uint32_t cf_atomic_int;
42 #define SIZEOF_ATOMIC_INT 4
43 typedef uint32_t cf_atomic_int_t; // the point here is a type of the same size as cf_atomic_int but isn't atomic
44 #define HAS_ATOMIC_32 1
45 #endif
46 
47 #ifdef MARCH_x86_64
48 typedef volatile uint64_t cf_atomic64;
49 typedef volatile uint32_t cf_atomic32;
50 typedef volatile uint64_t cf_atomic_p;
51 typedef volatile uint64_t cf_atomic_int;
52 #define SIZEOF_ATOMIC_INT 8
53 typedef uint64_t cf_atomic_int_t; // the point here is a type of the same size as cf_atomic_int but isn't atomic
54 #define HAS_ATOMIC_32 1
55 #define HAS_ATOMIC_64 1
56 #endif
57 
58 
59 
60 /*
61  * cf_atomicX_add
62  * Atomic addition: add a value b into an atomic integer a, and return the result
63  *
64  * cf_atomicX_cas
65  * Compare-and-swap: test a value b against an atomic integer a; if they
66  * are equal, store the value x into a, and return the initial value of a.
67  * "Success" can be checked by comparing the returned value against b
68  * NB: this is a strong memory barrier
69  *
70  * cf_atomicX_fas
71  * Fetch-and-swap: swap the values of b and a
72  *
73  * cf_atomicX_addunless
74  * Increment-unless: test a value b against an atomic integer a. If they
75  * are NOT equal, add x to a, and return non-zero; if they ARE equal, return
76  * zero
77  */
78 
79 
80 /* cf_atomic64_get
81  * Read an atomic value */
82 #ifdef HAS_ATOMIC_64
83 
84 #define cf_atomic64_get(a) (a)
85 #define cf_atomic64_set(a, b) (*(a) = (b))
86 
87 static inline int64_t
88 cf_atomic64_add(cf_atomic64 *a, int64_t b)
89 {
90  int64_t i = b;
91 
92 #ifndef CF_WINDOWS
93  __asm__ __volatile__ ("lock; xaddq %0, %1" : "+r" (b), "+m" (*a) : : "memory");
94 #else
95  b = _InterlockedExchangeAdd64((LONGLONG *)a, b);
96 #endif
97 
98  return(b + i);
99 }
100 #define cf_atomic64_sub(a,b) (cf_atomic64_add((a), (0 - (b))))
101 #define cf_atomic64_incr(a) (cf_atomic64_add((a), 1))
102 #define cf_atomic64_decr(a) (cf_atomic64_add((a), -1))
103 
104 #ifndef CF_WINDOWS
105 // This following section is not used by cl_libevent2 client.
106 // Thus not ported for libevent2 windows client.
107 // This also will help to clear out the sections which are no longer in use.
108 // In case somebody wants them in windows environment, please be aware they not ported yet.
109 
110 
111 static inline int64_t
112 cf_atomic64_cas(cf_atomic64 *a, int64_t b, int64_t x)
113 {
114  int64_t p;
115 
116  __asm__ __volatile__ ("lock; cmpxchgq %1,%2" : "=a"(p) : "q"(x), "m"(*(a)), "0"(b) : "memory");
117 
118  return(p);
119 }
120 
121 #define cf_atomic64_cas_m(_a, _b, _x) \
122 ({ __typeof__(_b) __b = _b; \
123  __asm__ __volatile__ ("lock; cmpxchgq %1,%2" : "=a"(__b) : "q"(_x), "m"(*(_a)), "0"(_b) : "memory"); \
124  __b; \
125 })
126 
127 /* cf_atomic64_fas
128  * Fetch-and-swap: swap the values of b and a */
129 static inline int64_t
130 cf_atomic64_fas(cf_atomic64 *a, cf_atomic64 *b)
131 {
132  int64_t p;
133 
134  __asm__ __volatile__ ("lock; xchgq %0,%1" : "=r"(p) : "m"(*(a)), "0"(*(b)) : "memory");
135 
136  return(p);
137 }
138 
139 #define cf_atomic64_fas_m(_a, _b) \
140 ({ __typeof__(_b) __b; \
141  __asm__ __volatile__ ("lock; xchgq %0,%1" : "=r"(__b) : "m"(*(_a)), "0"(_b)); \
142  __b; \
143 })
144 
145 
146 static inline int64_t
147 cf_atomic64_addunless(cf_atomic64 *a, int64_t b, int64_t x)
148 {
149  int64_t prior, cur;
150 
151  /* Get the current value of the atomic integer */
152  cur = cf_atomic64_get(*a);
153 
154  for ( ;; ) {
155  /* Check if the current value is equal to the criterion */
156  if (cur == b)
157  break;
158 
159  /* Attempt a compare-and-swap, which will return the value of cur
160  * prior to the operation */
161  prior = cf_atomic64_cas(a, cur, cur + x);
162 
163  /* If prior and cur are equal, then the operation has succeeded;
164  * otherwise, set cur to prior and go around again */
165  if (prior == cur)
166  break;
167  cur = prior;
168  }
169 
170  return(cur != b);
171 }
172 
173 #endif
174 
175 #endif // uint64
176 
177 #ifdef HAS_ATOMIC_32
178 
179 
180 #define cf_atomic32_get(a) (a)
181 #define cf_atomic32_set(a, b) (*(a) = (b))
182 
183 static inline int64_t
184 cf_atomic32_add(cf_atomic32 *a, int32_t b)
185 {
186  int32_t i = b;
187 
188 #ifndef CF_WINDOWS
189  __asm__ __volatile__ ("lock; xadd %0, %1" : "+r" (b), "+m" (*a) : : "memory");
190 #else
191  b = _InterlockedExchangeAdd((volatile long *)a, b);
192 #endif
193 
194  return(b + i);
195 }
196 #define cf_atomic32_sub(a,b) (cf_atomic32_add((a), (0 - (b))))
197 #define cf_atomic32_incr(a) (cf_atomic32_add((a), 1))
198 #define cf_atomic32_decr(a) (cf_atomic32_add((a), -1))
199 
200 #ifndef CF_WINDOWS
201 // This following section is not used by cl_libevent2 client.
202 // Thus not ported for libevent2 windows client.
203 // This also will help to clear out the sections which are no longer in use.
204 // In case somebody wants them in windows environment, please be aware they not ported yet.
205 
206 static inline int32_t
207 cf_atomic32_cas(cf_atomic32 *a, int32_t b, int32_t x)
208 {
209  int32_t p;
210 
211  __asm__ __volatile__ ("lock; cmpxchg %1,%2" : "=a"(p) : "q"(x), "m"(*(a)), "0"(b) : "memory");
212 
213  return(p);
214 }
215 
216 #define cf_atomic32_cas_m(_a, _b, _x) \
217 ({ __typeof__(_b) __b = _b; \
218  __asm__ __volatile__ ("lock; cmpxchg %1,%2" : "=a"(__b) : "q"(_x), "m"(*(_a)), "0"(_b) : "memory"); \
219  __b; \
220 })
221 
222 /* cf_atomic32_fas
223  * Fetch-and-swap: swap the values of b and a */
224 static inline int32_t
225 cf_atomic32_fas(cf_atomic32 *a, cf_atomic32 *b)
226 {
227  int32_t p;
228 
229  __asm__ __volatile__ ("lock; xchg %0,%1" : "=r"(p) : "m"(*(a)), "0"(*(b)) : "memory");
230 
231  return(p);
232 }
233 
234 
235 #define cf_atomic32_fas_m(_a, _b) \
236 ({ __typeof__(_b) __b; \
237  __asm__ __volatile__ ("lock; xchg %0,%1" : "=r"(__b) : "m"(*(_a)), "0"(_b)); \
238  __b; \
239 })
240 
241 
242 static inline int32_t
243 cf_atomic32_addunless(cf_atomic32 *a, int32_t b, int32_t x)
244 {
245  int32_t prior, cur;
246 
247  /* Get the current value of the atomic integer */
248  cur = cf_atomic32_get(*a);
249 
250  for ( ;; ) {
251  /* Check if the current value is equal to the criterion */
252  if (cur == b)
253  break;
254 
255  /* Attempt a compare-and-swap, which will return the value of cur
256  * prior to the operation */
257  prior = cf_atomic32_cas(a, cur, cur + x);
258 
259  /* If prior and cur are equal, then the operation has succeeded;
260  * otherwise, set cur to prior and go around again */
261  if (prior == cur)
262  break;
263  cur = prior;
264  }
265 
266  return(cur != b);
267 }
268 
269 #endif
270 
271 #endif // uint32
272 
273 #ifdef MARCH_i686
274 
275 #define cf_atomic_p_get(_a) cf_atomic32_get(_a)
276 #define cf_atomic_p_set(_a, _b) cf_atomic32_set(_a, _b)
277 #define cf_atomic_p_add(_a, _b) cf_atomic32_add(_a, _b)
278 #define cf_atomic_p_incr(_a) cf_atomic32_add((_a), 1)
279 #define cf_atomic_p_decr(_a) cf_atomic32_add((_a), -1)
280 
281 #ifndef CF_WINDOWS
282 #define cf_atomic_p_cas(_a, _b, _x) cf_atomic32_cas(_a, _b, _x)
283 #define cf_atomic_p_cas_m(_a, _b, _x) cf_atomic32_cas_m(_a, _b, _x)
284 #define cf_atomic_p_fas(_a, _b) cf_atomic32_fas(_a, _b)
285 #define cf_atomic_p_fas_m(_a, _b) cf_atomic32_fas_m(_a, _b)
286 #define cf_atomic_p_addunless(_a, _b, _x) cf_atomic32_addunless(_a, _b, _x)
287 #endif
288 
289 #define cf_atomic_int_get(_a) cf_atomic32_get(_a)
290 #define cf_atomic_int_set(_a, _b) cf_atomic32_set(_a, _b)
291 #define cf_atomic_int_add(_a, _b) cf_atomic32_add(_a, _b)
292 #define cf_atomic_int_sub(_a, _b) cf_atomic32_sub(_a, _b)
293 #define cf_atomic_int_incr(_a) cf_atomic32_add((_a), 1)
294 #define cf_atomic_int_decr(_a) cf_atomic32_add((_a), -1)
295 
296 #ifndef CF_WINDOWS
297 #define cf_atomic_int_cas(_a, _b, _x) cf_atomic32_cas(_a, _b, _x)
298 #define cf_atomic_int_cas_m(_a, _b, _x) cf_atomic32_cas_m(_a, _b, _x)
299 #define cf_atomic_int_fas(_a, _b) cf_atomic32_fas(_a, _b)
300 #define cf_atomic_int_fas_m(_a, _b) cf_atomic32_fas_m(_a, _b)
301 #define cf_atomic_int_addunless(_a, _b, _x) cf_atomic32_addunless(_a, _b, _x)
302 #endif
303 
304 #endif
305 
306 
307 
308 #ifdef MARCH_x86_64
309 
310 #define cf_atomic_p_get(_a) cf_atomic64_get(_a)
311 #define cf_atomic_p_set(_a, _b) cf_atomic64_set(_a, _b)
312 #define cf_atomic_p_add(_a, _b) cf_atomic64_add(_a, _b)
313 #define cf_atomic_p_incr(_a) cf_atomic64_add((_a), 1)
314 #define cf_atomic_p_decr(_a) cf_atomic64_add((_a), -1)
315 
316 #ifndef CF_WINDOWS
317 #define cf_atomic_p_cas(_a, _b, _x) cf_atomic64_cas(_a, _b, _x)
318 #define cf_atomic_p_cas_m(_a, _b, _x) cf_atomic64_cas_m(_a, _b, _x)
319 #define cf_atomic_p_fas(_a, _b) cf_atomic64_fas(_a, _b)
320 #define cf_atomic_p_fas_m(_a, _b) cf_atomic64_fas_m(_a, _b)
321 #define cf_atomic_p_addunless(_a, _b, _x) cf_atomic64_addunless(_a, _b, _x)
322 #endif
323 
324 #define cf_atomic_int_get(_a) cf_atomic64_get(_a)
325 #define cf_atomic_int_set(_a, _b) cf_atomic64_set(_a, _b)
326 #define cf_atomic_int_add(_a, _b) cf_atomic64_add(_a, _b)
327 #define cf_atomic_int_sub(_a, _b) cf_atomic64_sub(_a, _b)
328 #define cf_atomic_int_incr(_a) cf_atomic64_add((_a), 1)
329 #define cf_atomic_int_decr(_a) cf_atomic64_add((_a), -1)
330 
331 #ifndef CF_WINDOWS
332 #define cf_atomic_int_cas(_a, _b, _x) cf_atomic64_cas(_a, _b, _x)
333 #define cf_atomic_int_cas_m(_a, _b, _x) cf_atomic64_cas_m(_a, _b, _x)
334 #define cf_atomic_int_fas(_a, _b) cf_atomic64_fas(_a, _b)
335 #define cf_atomic_int_fas_m(_a, _b) cf_atomic64_fas_m(_a, _b)
336 #define cf_atomic_int_addunless(_a, _b, _x) cf_atomic64_addunless(_a, _b, _x)
337 #endif
338 
339 #endif
340 
341 
342 /* Memory barriers */
343 
344 // Knowledge taken from Linux's atomic_ops.h: for x64_64 though.
345 
346 #ifndef CF_WINDOWS
347 #define smb_mb() asm volatile("mfence":::"memory")
348 #else
349 #define smb_mb() _ReadWriteBarrier()
350 #endif
351 
352 
353 #ifndef CF_WINDOWS
354 
355 /* CF_BARRIER
356  * All preceding memory accesses must commit before any following accesses */
357 #define CF_MEMORY_BARRIER() __asm__ __volatile__ ("lock; addl $0,0(%%esp)" : : : "memory")
358 
359 /* CF_BARRIER_READ
360  * All preceding memory accesses must commit before any following accesses */
361 #define CF_MEMORY_BARRIER_READ() CF_MEMORY_BARRIER()
362 
363 /* CF_BARRIER_WRITE
364  * All preceding memory accesses must commit before any following accesses */
365 #define CF_MEMORY_BARRIER_WRITE() __asm__ __volatile__ ("" : : : "memory")
366 
367 #endif