All Data Structures Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Groups Pages
as_atomic_win.h
Go to the documentation of this file.
1 /*
2  * Copyright 2008-2023 Aerospike, Inc.
3  *
4  * Portions may be licensed to Aerospike, Inc. under one or more contributor
5  * license agreements.
6  *
7  * Licensed under the Apache License, Version 2.0 (the "License"); you may not
8  * use this file except in compliance with the License. You may obtain a copy of
9  * the License at http://www.apache.org/licenses/LICENSE-2.0
10  *
11  * Unless required by applicable law or agreed to in writing, software
12  * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
13  * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
14  * License for the specific language governing permissions and limitations under
15  * the License.
16  */
17 #pragma once
18 
19 // Atomics for Windows
20 #include <aerospike/as_std.h>
21 
22 #if defined(WIN32_LEAN_AND_MEAN)
23 #include <windows.h>
24 #else
25 #define WIN32_LEAN_AND_MEAN
26 #include <windows.h>
27 #undef WIN32_LEAN_AND_MEAN
28 #endif
29 #include <intrin.h>
30 
31 #ifdef __cplusplus
32 extern "C" {
33 #endif
34 
35 // We assume for now that our Windows clients will be run only on x86. The
36 // wrappers with "acquire" and "release" barrier semantics do not actually
37 // enforce the barriers - the wrappers are there to enable compilation only.
38 
39 // TODO - if we find we need the Windows client to run on ARM, revisit the
40 // acquire and release wrapped methods and implement them correctly.
41 
42 /******************************************************************************
43  * LOAD
44  *****************************************************************************/
45 
46 // void* as_load_ptr(const void** target)
47 #define as_load_ptr(_target) (*(void volatile**)(_target))
48 
49 // uint64_t as_load_uint64(const uint64_t* target)
50 #define as_load_uint64(_target) (*(uint64_t volatile*)(_target))
51 
52 // int64_t as_load_int64(const int64_t* target)
53 #define as_load_int64(_target) (*(int64_t volatile*)(_target))
54 
55 // uint32_t as_load_uint32(const uint32_t* target)
56 #define as_load_uint32(_target) (*(uint32_t volatile*)(_target))
57 
58 // int32_t as_load_int32(const int32_t* target)
59 #define as_load_int32(_target) (*(int32_t volatile*)(_target))
60 
61 // uint16_t as_load_uint16(const uint16_t* target)
62 #define as_load_uint16(_target) (*(uint16_t volatile*)(_target))
63 
64 // int16_t as_load_int16(const int16_t* target)
65 #define as_load_int16(_target) (*(int16_t volatile*)(_target))
66 
67 // uint8_t as_load_uint8(const uint8_t* target)
68 #define as_load_uint8(_target) (*(uint8_t volatile*)(_target))
69 
70 // int8_t as_load_int8(const int8_t* target)
71 #define as_load_int8(_target) (*(int8_t volatile*)(_target))
72 
73 // Assume Windows clients run only on x86 - wrappers to enable compilation only.
74 
75 // void* as_load_ptr_acq(const void** target)
76 #define as_load_ptr_acq(_target) (*(void volatile**)(_target))
77 
78 // uint64_t as_load_uint64_acq(const uint64_t* target)
79 #define as_load_uint64_acq(_target) (*(uint64_t volatile*)(_target))
80 
81 // int64_t as_load_int64_acq(const int64_t* target)
82 #define as_load_int64_acq(_target) (*(int64_t volatile*)(_target))
83 
84 // uint32_t as_load_uint32_acq(const uint32_t* target)
85 #define as_load_uint32_acq(_target) (*(uint32_t volatile*)(_target))
86 
87 // int32_t as_load_int32_acq(const int32_t* target)
88 #define as_load_int32_acq(_target) (*(int32_t volatile*)(_target))
89 
90 // uint16_t as_load_uint16_acq(const uint16_t* target)
91 #define as_load_uint16_acq(_target) (*(uint16_t volatile*)(_target))
92 
93 // int16_t as_load_int16_acq(const int16_t* target)
94 #define as_load_int16_acq(_target) (*(int16_t volatile*)(_target))
95 
96 // uint8_t as_load_uint8_acq(const uint8_t* target)
97 #define as_load_uint8_acq(_target) (*(uint8_t volatile*)(_target))
98 
99 // int8_t as_load_int8_acq(const int8_t* target)
100 #define as_load_int8_acq(_target) (*(int8_t volatile*)(_target))
101 
102 /******************************************************************************
103  * STORE
104  *****************************************************************************/
105 
106 // void as_store_ptr(void** target, void* value)
107 #define as_store_ptr(_target, _value) *(void volatile**)(_target) = _value
108 
109 // void as_store_uint64(uint64_t* target, uint64_t value)
110 #define as_store_uint64(_target, _value) *(uint64_t volatile*)(_target) = _value
111 
112 // void as_store_int64(int64_t* target, int64_t value)
113 #define as_store_int64(_target, _value) *(int64_t volatile*)(_target) = _value
114 
115 // void as_store_uint32(uint32_t* target, uint32_t value)
116 #define as_store_uint32(_target, _value) *(uint32_t volatile*)(_target) = _value
117 
118 // void as_store_int32(uint32_t* target, int32_t value)
119 #define as_store_int32(_target, _value) *(int32_t volatile*)(_target) = _value
120 
121 // void as_store_uint16(uint16_t* target, uint16_t value)
122 #define as_store_uint16(_target, _value) *(uint16_t volatile*)(_target) = _value
123 
124 // void as_store_int16(uint16_t* target, int16_t value)
125 #define as_store_int16(_target, _value) *(int16_t volatile*)(_target) = _value
126 
127 // void as_store_uint8(uint8_t* target, uint8_t value)
128 #define as_store_uint8(_target, _value) *(uint8_t volatile*)(_target) = _value
129 
130 // void as_store_int8(int8_t* target, int8_t value)
131 #define as_store_int8(_target, _value) *(int8_t volatile*)(_target) = _value
132 
133 // Assume Windows clients run only on x86 - wrappers to enable compilation only.
134 
135 // void as_store_ptr_rls(void** target, void* value)
136 #define as_store_ptr_rls(_target, _value) *(void volatile**)(_target) = _value
137 
138 // void as_store_uint64_rls(uint64_t* target, uint64_t value)
139 #define as_store_uint64_rls(_target, _value) *(uint64_t volatile*)(_target) = _value
140 
141 // void as_store_int64_rls(int64_t* target, int64_t value)
142 #define as_store_int64_rls(_target, _value) *(int64_t volatile*)(_target) = _value
143 
144 // void as_store_uint32_rls(uint32_t* target, uint32_t value)
145 #define as_store_uint32_rls(_target, _value) *(uint32_t volatile*)(_target) = _value
146 
147 // void as_store_int32_rls(uint32_t* target, int32_t value)
148 #define as_store_int32_rls(_target, _value) *(int32_t volatile*)(_target) = _value
149 
150 // void as_store_uint16_rls(uint16_t* target, uint16_t value)
151 #define as_store_uint16_rls(_target, _value) *(uint16_t volatile*)(_target) = _value
152 
153 // void as_store_int16_rls(uint16_t* target, int16_t value)
154 #define as_store_int16_rls(_target, _value) *(int16_t volatile*)(_target) = _value
155 
156 // void as_store_uint8_rls(uint8_t* target, uint8_t value)
157 #define as_store_uint8_rls(_target, _value) *(uint8_t volatile*)(_target) = _value
158 
159 // void as_store_int8_rls(int8_t* target, int8_t value)
160 #define as_store_int8_rls(_target, _value) *(int8_t volatile*)(_target) = _value
161 
162 /******************************************************************************
163  * FETCH AND ADD
164  *****************************************************************************/
165 
166 // uint64_t as_faa_uint64(uint64_t* target, int64_t value)
167 #define as_faa_uint64(_target, _value) (uint64_t)InterlockedExchangeAdd64((LONGLONG volatile*)(_target), _value)
168 
169 // int64_t as_faa_int64(int64_t* target, int64_t value)
170 #define as_faa_int64(_target, _value) InterlockedExchangeAdd64((LONGLONG volatile*)(_target), _value)
171 
172 // uint32_t as_faa_uint32(uint32_t* target, int32_t value)
173 #define as_faa_uint32(_target, _value) (uint32_t)InterlockedExchangeAdd((LONG volatile*)(_target), _value)
174 
175 // int32_t as_faa_int32(int32_t* target, int32_t value)
176 #define as_faa_int32(_target, _value) InterlockedExchangeAdd((LONG volatile*)(_target), _value)
177 
178 // Windows does not support 16 bit atomic fetch/add.
179 // uint16_t as_faa_uint16(uint16_t* target, int16_t value)
180 // int16_t as_faa_int16(int16_t* target, int16_t value)
181 
182 /******************************************************************************
183  * ADD AND FETCH
184  *****************************************************************************/
185 
186 // uint64_t as_aaf_uint64(uint64_t* target, int64_t value)
187 #define as_aaf_uint64(_target, _value) (uint64_t)InterlockedAdd64((LONGLONG volatile*)(_target), _value)
188 
189 // int64_t as_aaf_int64(int64_t* target, int64_t value)
190 #define as_aaf_int64(_target, _value) InterlockedAdd64((LONGLONG volatile*)(_target), _value)
191 
192 // uint32_t as_aaf_uint32(uint32_t* target, int32_t value)
193 #define as_aaf_uint32(_target, _value) (uint32_t)InterlockedAdd((LONG volatile*)(_target), _value)
194 
195 // int32_t as_aaf_int32(int32_t* target, int32_t value)
196 #define as_aaf_int32(_target, _value) InterlockedAdd((LONG volatile*)(_target), _value)
197 
198 // Assume Windows clients run only on x86 - wrappers to enable compilation only.
199 
200 // uint64_t as_aaf_uint64_rls(uint64_t* target, int64_t value)
201 #define as_aaf_uint64_rls(_target, _value) (uint64_t)InterlockedAdd64((LONGLONG volatile*)(_target), _value)
202 
203 // int64_t as_aaf_int64_rls(int64_t* target, int64_t value)
204 #define as_aaf_int64_rls(_target, _value) InterlockedAdd64((LONGLONG volatile*)(_target), _value)
205 
206 // uint32_t as_aaf_uint32_rls(uint32_t* target, int32_t value)
207 #define as_aaf_uint32_rls(_target, _value) (uint32_t)InterlockedAdd((LONG volatile*)(_target), _value)
208 
209 // int32_t as_aaf_int32_rls(int32_t* target, int32_t value)
210 #define as_aaf_int32_rls(_target, _value) InterlockedAdd((LONG volatile*)(_target), _value)
211 
212 // Windows does not support 16 bit atomic add/fetch.
213 // uint16_t as_aaf_uint16(uint16_t* target, int16_t value)
214 // int16_t as_aaf_int16(int16_t* target, int16_t value)
215 // uint16_t as_aaf_uint16_rls(uint16_t* target, int16_t value)
216 // int16_t as_aaf_int16_rls(int16_t* target, int16_t value)
217 
218 /******************************************************************************
219  * ADD
220  *****************************************************************************/
221 
222 // void as_add_uint64(uint64_t* target, int64_t value)
223 #define as_add_uint64(_target, _value) InterlockedExchangeAdd64((LONGLONG volatile*)(_target), _value)
224 
225 // void as_add_int64(int64_t* target, int64_t value)
226 #define as_add_int64(_target, _value) InterlockedExchangeAdd64((LONGLONG volatile*)(_target), _value)
227 
228 // void as_add_uint32(uint32_t* target, int32_t value)
229 #define as_add_uint32(_target, _value) InterlockedExchangeAdd((LONG volatile*)(_target), _value)
230 
231 // void as_add_int32(int32_t* target, int32_t value)
232 #define as_add_int32(_target, _value) InterlockedExchangeAdd((LONG volatile*)(_target), _value)
233 
234 // Windows does not support 16 bit atomic add.
235 // void as_add_uint16(uint16_t* target, int16_t value)
236 // void as_add_int16(int16_t* target, int16_t value)
237 
238 /******************************************************************************
239  * INCREMENT
240  *****************************************************************************/
241 
242 // void as_incr_uint64(uint64_t* target)
243 #define as_incr_uint64(_target) InterlockedIncrement64((LONGLONG volatile*)(_target))
244 
245 // void as_incr_int64(int64_t* target)
246 #define as_incr_int64(_target) InterlockedIncrement64((LONGLONG volatile*)(_target))
247 
248 // void as_incr_uint32(uint32_t* target)
249 #define as_incr_uint32(_target) InterlockedIncrement((LONG volatile*)(_target))
250 
251 // void as_incr_int32(int32_t* target)
252 #define as_incr_int32(_target) InterlockedIncrement((LONG volatile*)(_target))
253 
254 // void as_incr_uint16(uint16_t* target)
255 #define as_incr_uint16(_target) InterlockedIncrement16((short volatile*)(_target))
256 
257 // void as_incr_int16(int16_t* target)
258 #define as_incr_int16(_target) InterlockedIncrement16((short volatile*)(_target))
259 
260 // Assume Windows clients run only on x86 - wrappers to enable compilation only.
261 
262 // void as_incr_uint64_rls(uint64_t* target)
263 #define as_incr_uint64_rls(_target) InterlockedIncrement64((LONGLONG volatile*)(_target))
264 
265 // void as_incr_int64_rls(int64_t* target)
266 #define as_incr_int64_rls(_target) InterlockedIncrement64((LONGLONG volatile*)(_target))
267 
268 // void as_incr_uint32_rls(uint32_t* target)
269 #define as_incr_uint32_rls(_target) InterlockedIncrement((LONG volatile*)(_target))
270 
271 // void as_incr_int32_rls(int32_t* target)
272 #define as_incr_int32_rls(_target) InterlockedIncrement((LONG volatile*)(_target))
273 
274 // void as_incr_uint16_rls(uint16_t* target)
275 #define as_incr_uint16_rls(_target) InterlockedIncrement16((short volatile*)(_target))
276 
277 // void as_incr_int16_rls(int16_t* target)
278 #define as_incr_int16_rls(_target) InterlockedIncrement16((short volatile*)(_target))
279 
280 /******************************************************************************
281  * DECREMENT
282  *****************************************************************************/
283 
284 // void as_decr_uint64(uint64_t* target)
285 #define as_decr_uint64(_target) InterlockedDecrement64((LONGLONG volatile*)(_target))
286 
287 // void as_decr_int64(int64_t* target)
288 #define as_decr_int64(_target) InterlockedDecrement64((LONGLONG volatile*)(_target))
289 
290 // void as_decr_uint32(uint32_t* target)
291 #define as_decr_uint32(_target) InterlockedDecrement((LONG volatile*)(_target))
292 
293 // void as_decr_int32(int32_t* target)
294 #define as_decr_int32(_target) InterlockedDecrement((LONG volatile*)(_target))
295 
296 // void as_decr_uint16(uint16_t* target)
297 #define as_decr_uint16(_target) InterlockedDecrement16((short volatile*)(_target))
298 
299 // void as_decr_int16(int16_t* target)
300 #define as_decr_int16(_target) InterlockedDecrement16((short volatile*)(_target))
301 
302 // Assume Windows clients run only on x86 - wrappers to enable compilation only.
303 
304 // void as_decr_uint64_rls(uint64_t* target)
305 #define as_decr_uint64_rls(_target) InterlockedDecrement64((LONGLONG volatile*)(_target))
306 
307 // void as_decr_int64_rls(int64_t* target)
308 #define as_decr_int64_rls(_target) InterlockedDecrement64((LONGLONG volatile*)(_target))
309 
310 // void as_decr_uint32_rls(uint32_t* target)
311 #define as_decr_uint32_rls(_target) InterlockedDecrement((LONG volatile*)(_target))
312 
313 // void as_decr_int32_rls(int32_t* target)
314 #define as_decr_int32_rls(_target) InterlockedDecrement((LONG volatile*)(_target))
315 
316 // void as_decr_uint16_rls(uint16_t* target)
317 #define as_decr_uint16_rls(_target) InterlockedDecrement16((short volatile*)(_target))
318 
319 // void as_decr_int16_rls(int16_t* target)
320 #define as_decr_int16_rls(_target) InterlockedDecrement16((short volatile*)(_target))
321 
322 /******************************************************************************
323  * FETCH AND SWAP
324  *****************************************************************************/
325 
326 // uint64_t as_fas_uint64(uint64_t* target, uint64_t value)
327 #define as_fas_uint64(_target, _value) (uint64_t)InterlockedExchange64((LONGLONG volatile*)(_target), (LONGLONG)(_value))
328 
329 // int64_t as_fas_int64(int64_t* target, int64_t value)
330 #define as_fas_int64(_target, _value) InterlockedExchange64((LONGLONG volatile*)(_target), (LONGLONG)(_value))
331 
332 // uint32_t as_fas_uint32(uint32_t* target, uint32_t value)
333 #define as_fas_uint32(_target, _value) (uint32_t)InterlockedExchange((LONG volatile*)(_target), (LONG)(_value))
334 
335 // int32_t as_fas_int32(int32_t* target, int32_t value)
336 #define as_fas_int32(_target, _value) InterlockedExchange((LONG volatile*)(_target), (LONG)(_value))
337 
338 // uint16_t as_fas_uint16(uint16_t* target, uint16_t value)
339 #define as_fas_uint16(_target, _value) (uint16_t)InterlockedExchange16((short volatile*)(_target), (LONG)(_value))
340 
341 // int16_t as_fas_int16(int16_t* target, int16_t value)
342 #define as_fas_int16(_target, _value) InterlockedExchange16((short volatile*)(_target), (LONG)(_value))
343 
344 /******************************************************************************
345  * COMPARE AND SWAP
346  *****************************************************************************/
347 
348 // bool as_cas_uint64(uint64_t* target, uint64_t old_value, uint64_t new_value)
349 #define as_cas_uint64(_target, _old_value, _new_value) (InterlockedCompareExchange64((LONGLONG volatile*)(_target), (LONGLONG)(_new_value), (LONGLONG)(_old_value)) == (LONGLONG)(_old_value))
350 
351 // bool as_cas_int64(int64_t* target, int64_t old_value, int64_t new_value)
352 #define as_cas_int64(_target, _old_value, _new_value) (InterlockedCompareExchange64((LONGLONG volatile*)(_target), (LONGLONG)(_new_value), (LONGLONG)(_old_value)) == (LONGLONG)(_old_value))
353 
354 // bool as_cas_uint32(uint32_t* target, uint32_t old_value, uint32_t new_value)
355 #define as_cas_uint32(_target, _old_value, _new_value) (InterlockedCompareExchange((LONG volatile*)(_target), (LONG)(_new_value), (LONG)(_old_value)) == (LONG)(_old_value))
356 
357 // bool as_cas_int32(int32_t* target, int32_t old_value, int32_t new_value)
358 #define as_cas_int32(_target, _old_value, _new_value) (InterlockedCompareExchange((LONG volatile*)(_target), (LONG)(_new_value), (LONG)(_old_value)) == (LONG)(_old_value))
359 
360 // bool as_cas_uint16(uint16_t* target, uint16_t old_value, uint16_t new_value)
361 #define as_cas_uint16(_target, _old_value, _new_value) (InterlockedCompareExchange16((short volatile*)(_target), (short)(_new_value), (short)(_old_value)) == (short)(_old_value))
362 
363 // bool as_cas_int16(int16_t* target, int16_t old_value, int16_t new_value)
364 #define as_cas_int16(_target, _old_value, _new_value) (InterlockedCompareExchange16((short volatile*)(_target), (short)(_new_value), (short)(_old_value)) == (short)(_old_value))
365 
366 // bool as_cas_uint8(uint8_t* target, uint8_t old_value, uint8_t new_value)
367 #define as_cas_uint8(_target, _old_value, _new_value) (_InterlockedCompareExchange8((char volatile*)(_target), (char)(_new_value), (char)(_old_value)) == (char)(_old_value))
368 
369 // bool as_cas_int8(int8_t* target, int8_t old_value, int8_t new_value)
370 #define as_cas_int8(_target, _old_value, _new_value) (_InterlockedCompareExchange8((char volatile*)(_target), (char)(_new_value), (char)(_old_value)) == (char)(_old_value))
371 
372 /******************************************************************************
373  * MEMORY FENCE
374  *****************************************************************************/
375 
376 // The atomic include causes compiler errors in Visual Studio 17. Therefore,
377 // the new atomic_thread_fence() memory barriers can't be used yet.
378 // #include <atomic>
379 
380 // void as_fence_acq()
381 #define as_fence_acq MemoryBarrier
382 
383 // void as_fence_rls()
384 #define as_fence_rls MemoryBarrier
385 
386 // void as_fence_rlx()
387 #define as_fence_rlx MemoryBarrier
388 
389 // void as_fence_seq()
390 #define as_fence_seq MemoryBarrier
391 
392 /******************************************************************************
393  * SPIN LOCK
394  *****************************************************************************/
395 
396 typedef uint32_t as_spinlock;
397 
398 #define AS_SPINLOCK_INIT { 0 }
399 #define as_spinlock_init(_s) *(_s) = 0
400 #define as_spinlock_destroy(_s) ((void)_s) // no-op
401 
402 static inline void
403 as_spinlock_lock(as_spinlock* lock)
404 {
405  while (as_fas_uint32(lock, 1) == 1) {
406  while (as_load_uint32(lock) == 1)
407  YieldProcessor();
408  }
409 
410  MemoryBarrier();
411 }
412 
413 static inline void
414 as_spinlock_unlock(as_spinlock* lock)
415 {
416  MemoryBarrier();
417  as_store_uint32(lock, 0);
418 }
419 
420 /******************************************************************************
421  * SPIN WRITER/READERS LOCK
422  *****************************************************************************/
423 
424 typedef uint32_t as_swlock;
425 
426 #define AS_SWLOCK_INIT { 0 }
427 #define as_swlock_init(_s) (_s) = 0
428 #define as_swlock_destroy(_s) ((void)_s) // no-op
429 
430 #define AS_SWLOCK_WRITER_BIT (1 << 31)
431 #define AS_SWLOCK_LATCH_BIT (1 << 30)
432 #define AS_SWLOCK_WRITER_MASK (AS_SWLOCK_LATCH_BIT | AS_SWLOCK_WRITER_BIT)
433 #define AS_SWLOCK_READER_MASK (UINT32_MAX ^ AS_SWLOCK_WRITER_MASK)
434 
435 static inline void
436 as_swlock_write_lock(as_swlock* lock)
437 {
438  InterlockedOr((LONG volatile*)lock, AS_SWLOCK_WRITER_BIT);
439 
440  while ((as_load_uint32(lock) & AS_SWLOCK_READER_MASK) != 0) {
441  YieldProcessor();
442  }
443 
444  MemoryBarrier();
445 }
446 
447 static inline void
448 as_swlock_write_unlock(as_swlock* lock)
449 {
450  MemoryBarrier();
451  InterlockedAnd((LONG volatile*)lock, AS_SWLOCK_READER_MASK);
452 }
453 
454 static inline void
455 as_swlock_read_lock(as_swlock* lock)
456 {
457  while (true) {
458  while ((as_load_uint32(lock) & AS_SWLOCK_WRITER_BIT) != 0) {
459  YieldProcessor();
460  }
461 
462  uint32_t l = as_faa_uint32(lock, 1) & AS_SWLOCK_WRITER_MASK;
463 
464  if (l == 0) {
465  break;
466  }
467 
468  if (l == AS_SWLOCK_WRITER_BIT) {
469  as_decr_uint32(lock);
470  }
471  }
472 
473  MemoryBarrier();
474 }
475 
476 static inline void
477 as_swlock_read_unlock(as_swlock* lock)
478 {
479  MemoryBarrier();
480  as_decr_uint32(lock);
481 }
482 
483 /******************************************************************************
484  * SET MAX
485  *****************************************************************************/
486 
487 static inline bool
488 as_setmax_uint64(uint64_t* target, uint64_t x)
489 {
490  uint64_t prior;
491 
492  // Get the current value of the atomic integer.
493  uint64_t cur = as_load_uint64(target);
494 
495  while (x > cur) {
496  // Proposed value is larger than current - attempt compare-and-swap.
497  prior = InterlockedCompareExchange64((LONGLONG volatile*)target, x, cur);
498 
499  if (cur == prior) {
500  // Current value was unchanged, proposed value swapped in.
501  return true;
502  }
503 
504  // Current value had changed, set cur to prior and go around again.
505  cur = prior;
506  }
507 
508  // Proposed value not swapped in as new maximum.
509  return false;
510 }
511 
512 static inline bool
513 as_setmax_uint32(uint32_t* target, uint32_t x)
514 {
515  uint32_t prior;
516 
517  // Get the current value of the atomic integer.
518  uint32_t cur = as_load_uint32(target);
519 
520  while (x > cur) {
521  // Proposed value is larger than current - attempt compare-and-swap.
522  prior = InterlockedCompareExchange((LONG volatile*)target, x, cur);
523 
524  if (cur == prior) {
525  return true;
526  }
527 
528  // Current value had changed, set cur to prior and go around again.
529  cur = prior;
530  }
531 
532  // Proposed value not swapped in as new maximum.
533  return false;
534 }
535 
536 #ifdef __cplusplus
537 } // end extern "C"
538 #endif
#define as_store_uint32(_target, _value)
static void as_swlock_read_lock(as_swlock *lock)
uint32_t as_swlock
static bool as_setmax_uint32(uint32_t *target, uint32_t x)
#define as_decr_uint32(_target)
#define as_fas_uint32(_target, _value)
#define as_load_uint64(_target)
Definition: as_atomic_win.h:50
#define as_faa_uint32(_target, _value)
static void as_swlock_write_unlock(as_swlock *lock)
#define AS_SWLOCK_READER_MASK
static void as_swlock_write_lock(as_swlock *lock)
#define as_load_uint32(_target)
Definition: as_atomic_win.h:56
static void as_spinlock_lock(as_spinlock *lock)
uint32_t as_spinlock
#define AS_SWLOCK_WRITER_BIT
static void as_swlock_read_unlock(as_swlock *lock)
static bool as_setmax_uint64(uint64_t *target, uint64_t x)
#define AS_SWLOCK_WRITER_MASK
static void as_spinlock_unlock(as_spinlock *lock)