1 | /*␊ |
2 | * Copyright (c) 2004-2006 Apple Computer, Inc. All rights reserved.␊ |
3 | *␊ |
4 | * @APPLE_LICENSE_HEADER_START@␊ |
5 | * ␊ |
6 | * This file contains Original Code and/or Modifications of Original Code␊ |
7 | * as defined in and that are subject to the Apple Public Source License␊ |
8 | * Version 2.0 (the 'License'). You may not use this file except in␊ |
9 | * compliance with the License. Please obtain a copy of the License at␊ |
10 | * http://www.opensource.apple.com/apsl/ and read it before using this␊ |
11 | * file.␊ |
12 | * ␊ |
13 | * The Original Code and all software distributed under the License are␊ |
14 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER␊ |
15 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,␊ |
16 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,␊ |
17 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.␊ |
18 | * Please see the License for the specific language governing rights and␊ |
19 | * limitations under the License.␊ |
20 | * ␊ |
21 | * @APPLE_LICENSE_HEADER_END@␊ |
22 | */␊ |
23 | ␊ |
24 | #ifndef _OSATOMIC_H_␊ |
25 | #define _OSATOMIC_H_␊ |
26 | ␊ |
27 | #include <stddef.h>␊ |
28 | #include <sys/cdefs.h>␊ |
29 | #include <stdint.h>␊ |
30 | #include <stdbool.h>␊ |
31 | ␊ |
32 | #include <Availability.h>␊ |
33 | ␊ |
34 | /*! @header␊ |
35 | * These are the preferred versions of the atomic and synchronization operations.␊ |
36 | * Their implementation is customized at boot time for the platform, including␊ |
37 | * late-breaking errata fixes as necessary. They are thread safe.␊ |
38 | *␊ |
39 | * WARNING: all addresses passed to these functions must be "naturally aligned",␊ |
40 | * i.e. * <code>int32_t</code> pointers must be 32-bit aligned (low 2 bits of␊ |
41 | * address are zeroes), and <code>int64_t</code> pointers must be 64-bit aligned␊ |
42 | * (low 3 bits of address are zeroes.)␊ |
43 | *␊ |
44 | * Note that some versions of the atomic functions incorporate memory barriers␊ |
45 | * and some do not. Barriers strictly order memory access on weakly-ordered␊ |
46 | * architectures such as PPC. All loads and stores that appear (in sequential␊ |
47 | * program order) before the barrier are guaranteed to complete before any␊ |
48 | * load or store that appears after the barrier.␊ |
49 | *␊ |
50 | * On a uniprocessor system, the barrier operation is typically a no-op. On a␊ |
51 | * multiprocessor system, the barrier can be quite expensive on some platforms,␊ |
52 | * such as PPC.␊ |
53 | *␊ |
54 | * Most code should use the barrier functions to ensure that memory shared between␊ |
55 | * threads is properly synchronized. For example, if you want to initialize␊ |
56 | * a shared data structure and then atomically increment a variable to indicate␊ |
57 | * that the initialization is complete, you must use {@link OSAtomicIncrement32Barrier}␊ |
58 | * to ensure that the stores to your data structure complete before the atomic␊ |
59 | * increment.␊ |
60 | *␊ |
61 | * Likewise, the consumer of that data structure must use {@link OSAtomicDecrement32Barrier},␊ |
62 | * in order to ensure that their loads of the structure are not executed before␊ |
63 | * the atomic decrement. On the other hand, if you are simply incrementing a global␊ |
64 | * counter, then it is safe and potentially faster to use {@link OSAtomicIncrement32}.␊ |
65 | *␊ |
66 | * If you are unsure which version to use, prefer the barrier variants as they are␊ |
67 | * safer.␊ |
68 | *␊ |
69 | * The spinlock and queue operations always incorporate a barrier.␊ |
70 | *␊ |
71 | * For the kernel-space version of this header, see␊ |
72 | * {@link //apple_ref/doc/header/OSAtomic.h OSAtomic.h (Kernel Framework)}␊ |
73 | *␊ |
74 | * @apiuid //apple_ref/doc/header/user_space_OSAtomic.h␊ |
75 | */ ␊ |
76 | __BEGIN_DECLS␊ |
77 | ␊ |
78 | ␊ |
79 | /*! @group Arithmetic functions␊ |
80 | All functions in this group return the new value.␊ |
81 | */␊ |
82 | ␊ |
83 | /*! @abstract Atomically adds two 32-bit values.␊ |
84 | @discussion␊ |
85 | ␉This function adds the value given by <code>__theAmount</code> to the␊ |
86 | ␉value in the memory location referenced by <code>__theValue</code>,␊ |
87 | ␉storing the result back to that memory location atomically.␊ |
88 | @result Returns the new value.␊ |
89 | */␊ |
90 | int32_t␉OSAtomicAdd32( int32_t __theAmount, volatile int32_t *__theValue );␊ |
91 | ␊ |
92 | ␊ |
93 | /*! @abstract Atomically adds two 32-bit values.␊ |
94 | @discussion␊ |
95 | ␉This function adds the value given by <code>__theAmount</code> to the␊ |
96 | ␉value in the memory location referenced by <code>__theValue</code>,␊ |
97 | ␉storing the result back to that memory location atomically.␊ |
98 | ␊ |
99 | ␉This function is equivalent to {@link OSAtomicAdd32}␊ |
100 | ␉except that it also introduces a barrier.␊ |
101 | @result Returns the new value.␊ |
102 | */␊ |
103 | int32_t␉OSAtomicAdd32Barrier( int32_t __theAmount, volatile int32_t *__theValue );␊ |
104 | ␊ |
105 | ␊ |
106 | /*! @abstract Atomically increments a 32-bit value.␊ |
107 | */␊ |
108 | __inline static␊ |
109 | int32_t␉OSAtomicIncrement32( volatile int32_t *__theValue )␊ |
110 | { return OSAtomicAdd32( 1, __theValue); }␊ |
111 | ␊ |
112 | ␊ |
113 | /*! @abstract Atomically increments a 32-bit value with a barrier.␊ |
114 | @discussion␊ |
115 | ␉This function is equivalent to {@link OSAtomicIncrement32}␊ |
116 | ␉except that it also introduces a barrier.␊ |
117 | @result Returns the new value.␊ |
118 | */␊ |
119 | __inline static␊ |
120 | int32_t␉OSAtomicIncrement32Barrier( volatile int32_t *__theValue )␊ |
121 | { return OSAtomicAdd32Barrier( 1, __theValue); }␊ |
122 | ␊ |
123 | /*! @abstract Atomically decrements a 32-bit value. */␊ |
124 | __inline static␊ |
125 | int32_t␉OSAtomicDecrement32( volatile int32_t *__theValue )␊ |
126 | { return OSAtomicAdd32( -1, __theValue); }␊ |
127 | ␊ |
128 | /*! @abstract Atomically increments a 32-bit value with a barrier.␊ |
129 | @discussion␊ |
130 | ␉This function is equivalent to {@link OSAtomicDecrement32}␊ |
131 | ␉except that it also introduces a barrier.␊ |
132 | @result Returns the new value.␊ |
133 | */␊ |
134 | __inline static␊ |
135 | int32_t␉OSAtomicDecrement32Barrier( volatile int32_t *__theValue )␊ |
136 | { return OSAtomicAdd32Barrier( -1, __theValue); }␊ |
137 | ␊ |
138 | ␊ |
139 | /*! @abstract Atomically adds two 64-bit values.␊ |
140 | @discussion␊ |
141 | ␉This function adds the value given by <code>__theAmount</code> to the␊ |
142 | ␉value in the memory location referenced by <code>__theValue</code>,␊ |
143 | ␉storing the result back to that memory location atomically.␊ |
144 | */␊ |
145 | int64_t␉OSAtomicAdd64( int64_t __theAmount, volatile int64_t *__theValue );␊ |
146 | ␊ |
147 | ␊ |
148 | /*! @abstract Atomically adds two 64-bit values with a barrier.␊ |
149 | @discussion␊ |
150 | ␉This function adds the value given by <code>__theAmount</code> to the␊ |
151 | ␉value in the memory location referenced by <code>__theValue</code>,␊ |
152 | ␉storing the result back to that memory location atomically.␊ |
153 | ␊ |
154 | ␉This function is equivalent to {@link OSAtomicAdd64}␊ |
155 | ␉except that it also introduces a barrier.␊ |
156 | @result Returns the new value.␊ |
157 | */␊ |
158 | int64_t␉OSAtomicAdd64Barrier( int64_t __theAmount, volatile int64_t *__theValue ) __OSX_AVAILABLE_STARTING(__MAC_10_4, __IPHONE_3_2); ␊ |
159 | ␊ |
160 | ␊ |
161 | /*! @abstract Atomically increments a 64-bit value. */␊ |
162 | __inline static␊ |
163 | int64_t␉OSAtomicIncrement64( volatile int64_t *__theValue )␊ |
164 | { return OSAtomicAdd64( 1, __theValue); }␊ |
165 | ␊ |
166 | /*! @abstract Atomically increments a 64-bit value with a barrier.␊ |
167 | @discussion␊ |
168 | ␉This function is equivalent to {@link OSAtomicIncrement64}␊ |
169 | ␉except that it also introduces a barrier.␊ |
170 | @result Returns the new value.␊ |
171 | */␊ |
172 | __inline static␊ |
173 | int64_t␉OSAtomicIncrement64Barrier( volatile int64_t *__theValue )␊ |
174 | { return OSAtomicAdd64Barrier( 1, __theValue); }␊ |
175 | ␊ |
176 | ␊ |
177 | /*! @abstract Atomically decrements a 64-bit value.␊ |
178 | @discussion␊ |
179 | ␉This function is equivalent to {@link OSAtomicIncrement64}␊ |
180 | ␉except that it also introduces a barrier.␊ |
181 | @result Returns the new value.␊ |
182 | */␊ |
183 | __inline static␊ |
184 | int64_t␉OSAtomicDecrement64( volatile int64_t *__theValue )␊ |
185 | { return OSAtomicAdd64( -1, __theValue); }␊ |
186 | ␊ |
187 | ␊ |
188 | /*! @abstract Atomically decrements a 64-bit value with a barrier.␊ |
189 | @discussion␊ |
190 | ␉This function is equivalent to {@link OSAtomicDecrement64}␊ |
191 | ␉except that it also introduces a barrier.␊ |
192 | @result Returns the new value.␊ |
193 | */␊ |
194 | __inline static␊ |
195 | int64_t␉OSAtomicDecrement64Barrier( volatile int64_t *__theValue )␊ |
196 | { return OSAtomicAdd64Barrier( -1, __theValue); }␊ |
197 | ␊ |
198 | ␊ |
199 | /*! @group Boolean functions (AND, OR, XOR)␊ |
200 | * ␊ |
201 | * @discussion Functions in this group come in four variants for each operation:␊ |
202 | * with and without barriers, and functions that return the original value or␊ |
203 | * the result value of the operation.␊ |
204 | * ␊ |
205 | * The "Orig" versions return the original value, (before the operation); the non-Orig␊ |
206 | * versions return the value after the operation. All are layered on top of␊ |
207 | * {@link OSAtomicCompareAndSwap32} and similar.␊ |
208 | */␊ |
209 | ␊ |
210 | /*! @abstract Atomic bitwise OR of two 32-bit values.␊ |
211 | @discussion␊ |
212 | ␉This function performs the bitwise OR of the value given by <code>__theMask</code>␊ |
213 | ␉with the value in the memory location referenced by <code>__theValue</code>,␊ |
214 | ␉storing the result back to that memory location atomically.␊ |
215 | @result Returns the new value.␊ |
216 | */␊ |
217 | int32_t␉OSAtomicOr32( uint32_t __theMask, volatile uint32_t *__theValue );␊ |
218 | ␊ |
219 | ␊ |
220 | /*! @abstract Atomic bitwise OR of two 32-bit values with barrier.␊ |
221 | @discussion␊ |
222 | ␉This function performs the bitwise OR of the value given by <code>__theMask</code>␊ |
223 | ␉with the value in the memory location referenced by <code>__theValue</code>,␊ |
224 | ␉storing the result back to that memory location atomically.␊ |
225 | ␊ |
226 | ␉This function is equivalent to {@link OSAtomicOr32}␊ |
227 | ␉except that it also introduces a barrier.␊ |
228 | @result Returns the new value.␊ |
229 | */␊ |
230 | int32_t␉OSAtomicOr32Barrier( uint32_t __theMask, volatile uint32_t *__theValue );␊ |
231 | ␊ |
232 | ␊ |
233 | /*! @abstract Atomic bitwise OR of two 32-bit values returning original.␊ |
234 | @discussion␊ |
235 | ␉This function performs the bitwise OR of the value given by <code>__theMask</code>␊ |
236 | ␉with the value in the memory location referenced by <code>__theValue</code>,␊ |
237 | ␉storing the result back to that memory location atomically.␊ |
238 | @result Returns the original value referenced by <code>__theValue</code>.␊ |
239 | */␊ |
240 | int32_t␉OSAtomicOr32Orig( uint32_t __theMask, volatile uint32_t *__theValue ) __OSX_AVAILABLE_STARTING(__MAC_10_5, __IPHONE_3_2);␊ |
241 | ␊ |
242 | ␊ |
243 | /*! @abstract Atomic bitwise OR of two 32-bit values returning original with barrier.␊ |
244 | @discussion␊ |
245 | ␉This function performs the bitwise OR of the value given by <code>__theMask</code>␊ |
246 | ␉with the value in the memory location referenced by <code>__theValue</code>,␊ |
247 | ␉storing the result back to that memory location atomically.␊ |
248 | ␊ |
249 | ␉This function is equivalent to {@link OSAtomicOr32Orig}␊ |
250 | ␉except that it also introduces a barrier.␊ |
251 | @result Returns the original value referenced by <code>__theValue</code>.␊ |
252 | */␊ |
253 | int32_t␉OSAtomicOr32OrigBarrier( uint32_t __theMask, volatile uint32_t *__theValue ) __OSX_AVAILABLE_STARTING(__MAC_10_5, __IPHONE_3_2);␊ |
254 | ␊ |
255 | ␊ |
256 | ␊ |
257 | ␊ |
258 | /*! @abstract Atomic bitwise AND of two 32-bit values.␊ |
259 | @discussion␊ |
260 | ␉This function performs the bitwise AND of the value given by <code>__theMask</code>␊ |
261 | ␉with the value in the memory location referenced by <code>__theValue</code>,␊ |
262 | ␉storing the result back to that memory location atomically.␊ |
263 | @result Returns the new value.␊ |
264 | */␊ |
265 | int32_t␉OSAtomicAnd32( uint32_t __theMask, volatile uint32_t *__theValue ); ␊ |
266 | ␊ |
267 | ␊ |
268 | /*! @abstract Atomic bitwise AND of two 32-bit values with barrier.␊ |
269 | @discussion␊ |
270 | ␉This function performs the bitwise AND of the value given by <code>__theMask</code>␊ |
271 | ␉with the value in the memory location referenced by <code>__theValue</code>,␊ |
272 | ␉storing the result back to that memory location atomically.␊ |
273 | ␊ |
274 | ␉This function is equivalent to {@link OSAtomicAnd32}␊ |
275 | ␉except that it also introduces a barrier.␊ |
276 | @result Returns the new value.␊ |
277 | */␊ |
278 | int32_t␉OSAtomicAnd32Barrier( uint32_t __theMask, volatile uint32_t *__theValue ); ␊ |
279 | ␊ |
280 | ␊ |
281 | /*! @abstract Atomic bitwise AND of two 32-bit values returning original.␊ |
282 | @discussion␊ |
283 | ␉This function performs the bitwise AND of the value given by <code>__theMask</code>␊ |
284 | ␉with the value in the memory location referenced by <code>__theValue</code>,␊ |
285 | ␉storing the result back to that memory location atomically.␊ |
286 | @result Returns the original value referenced by <code>__theValue</code>.␊ |
287 | */␊ |
288 | int32_t␉OSAtomicAnd32Orig( uint32_t __theMask, volatile uint32_t *__theValue ) __OSX_AVAILABLE_STARTING(__MAC_10_5, __IPHONE_3_2);␊ |
289 | ␊ |
290 | ␊ |
291 | /*! @abstract Atomic bitwise AND of two 32-bit values returning original with barrier.␊ |
292 | @discussion␊ |
293 | ␉This function performs the bitwise AND of the value given by <code>__theMask</code>␊ |
294 | ␉with the value in the memory location referenced by <code>__theValue</code>,␊ |
295 | ␉storing the result back to that memory location atomically.␊ |
296 | ␊ |
297 | ␉This function is equivalent to {@link OSAtomicAnd32Orig}␊ |
298 | ␉except that it also introduces a barrier.␊ |
299 | @result Returns the original value referenced by <code>__theValue</code>.␊ |
300 | */␊ |
301 | int32_t␉OSAtomicAnd32OrigBarrier( uint32_t __theMask, volatile uint32_t *__theValue ) __OSX_AVAILABLE_STARTING(__MAC_10_5, __IPHONE_3_2);␊ |
302 | ␊ |
303 | ␊ |
304 | ␊ |
305 | ␊ |
306 | /*! @abstract Atomic bitwise XOR of two 32-bit values.␊ |
307 | @discussion␊ |
308 | ␉This function performs the bitwise XOR of the value given by <code>__theMask</code>␊ |
309 | ␉with the value in the memory location referenced by <code>__theValue</code>,␊ |
310 | ␉storing the result back to that memory location atomically.␊ |
311 | @result Returns the new value.␊ |
312 | */␊ |
313 | int32_t␉OSAtomicXor32( uint32_t __theMask, volatile uint32_t *__theValue );␊ |
314 | ␊ |
315 | ␊ |
316 | /*! @abstract Atomic bitwise XOR of two 32-bit values with barrier.␊ |
317 | @discussion␊ |
318 | ␉This function performs the bitwise XOR of the value given by <code>__theMask</code>␊ |
319 | ␉with the value in the memory location referenced by <code>__theValue</code>,␊ |
320 | ␉storing the result back to that memory location atomically.␊ |
321 | ␊ |
322 | ␉This function is equivalent to {@link OSAtomicXor32}␊ |
323 | ␉except that it also introduces a barrier.␊ |
324 | @result Returns the new value.␊ |
325 | */␊ |
326 | int32_t␉OSAtomicXor32Barrier( uint32_t __theMask, volatile uint32_t *__theValue );␊ |
327 | ␊ |
328 | ␊ |
329 | /*! @abstract Atomic bitwise XOR of two 32-bit values returning original.␊ |
330 | @discussion␊ |
331 | ␉This function performs the bitwise XOR of the value given by <code>__theMask</code>␊ |
332 | ␉with the value in the memory location referenced by <code>__theValue</code>,␊ |
333 | ␉storing the result back to that memory location atomically.␊ |
334 | @result Returns the original value referenced by <code>__theValue</code>.␊ |
335 | */␊ |
336 | int32_t␉OSAtomicXor32Orig( uint32_t __theMask, volatile uint32_t *__theValue ) __OSX_AVAILABLE_STARTING(__MAC_10_5, __IPHONE_3_2);␊ |
337 | ␊ |
338 | ␊ |
339 | /*! @abstract Atomic bitwise XOR of two 32-bit values returning original with barrier.␊ |
340 | @discussion␊ |
341 | ␉This function performs the bitwise XOR of the value given by <code>__theMask</code>␊ |
342 | ␉with the value in the memory location referenced by <code>__theValue</code>,␊ |
343 | ␉storing the result back to that memory location atomically.␊ |
344 | ␊ |
345 | ␉This function is equivalent to {@link OSAtomicXor32Orig}␊ |
346 | ␉except that it also introduces a barrier.␊ |
347 | @result Returns the original value referenced by <code>__theValue</code>.␊ |
348 | */␊ |
349 | int32_t␉OSAtomicXor32OrigBarrier( uint32_t __theMask, volatile uint32_t *__theValue ) __OSX_AVAILABLE_STARTING(__MAC_10_5, __IPHONE_3_2);␊ |
350 | ␊ |
351 | ␊ |
352 | /*! @group Compare and swap␊ |
353 | * Functions in this group return true if the swap occured. There are several versions,␊ |
354 | * depending on data type and on whether or not a barrier is used.␊ |
355 | */␊ |
356 | ␊ |
357 | ␊ |
358 | /*! @abstract Compare and swap for 32-bit values.␊ |
359 | @discussion␊ |
360 | ␉This function compares the value in <code>__oldValue</code> to the value␊ |
361 | ␉in the memory location referenced by <code>__theValue</code>. If the values␊ |
362 | ␉match, this function stores the value from <code>__newValue</code> into␊ |
363 | ␉that memory location atomically.␊ |
364 | @result Returns TRUE on a match, FALSE otherwise.␊ |
365 | */␊ |
366 | bool OSAtomicCompareAndSwap32( int32_t __oldValue, int32_t __newValue, volatile int32_t *__theValue );␊ |
367 | ␊ |
368 | ␊ |
369 | /*! @abstract Compare and swap for 32-bit values with barrier.␊ |
370 | @discussion␊ |
371 | ␉This function compares the value in <code>__oldValue</code> to the value␊ |
372 | ␉in the memory location referenced by <code>__theValue</code>. If the values␊ |
373 | ␉match, this function stores the value from <code>__newValue</code> into␊ |
374 | ␉that memory location atomically.␊ |
375 | ␊ |
376 | ␉This function is equivalent to {@link OSAtomicCompareAndSwap32}␊ |
377 | ␉except that it also introduces a barrier.␊ |
378 | @result Returns TRUE on a match, FALSE otherwise.␊ |
379 | */␊ |
380 | bool OSAtomicCompareAndSwap32Barrier( int32_t __oldValue, int32_t __newValue, volatile int32_t *__theValue );␊ |
381 | ␊ |
382 | ␊ |
383 | /*! @abstract Compare and swap pointers.␊ |
384 | @discussion␊ |
385 | ␉This function compares the pointer stored in <code>__oldValue</code> to the pointer␊ |
386 | ␉in the memory location referenced by <code>__theValue</code>. If the pointers␊ |
387 | ␉match, this function stores the pointer from <code>__newValue</code> into␊ |
388 | ␉that memory location atomically.␊ |
389 | @result Returns TRUE on a match, FALSE otherwise.␊ |
390 | */␊ |
391 | bool␉OSAtomicCompareAndSwapPtr( void *__oldValue, void *__newValue, void * volatile *__theValue ) __OSX_AVAILABLE_STARTING(__MAC_10_5, __IPHONE_2_0);␊ |
392 | ␊ |
393 | ␊ |
394 | /*! @abstract Compare and swap pointers with barrier.␊ |
395 | @discussion␊ |
396 | ␉This function compares the pointer stored in <code>__oldValue</code> to the pointer␊ |
397 | ␉in the memory location referenced by <code>__theValue</code>. If the pointers␊ |
398 | ␉match, this function stores the pointer from <code>__newValue</code> into␊ |
399 | ␉that memory location atomically.␊ |
400 | ␊ |
401 | ␉This function is equivalent to {@link OSAtomicCompareAndSwapPtr}␊ |
402 | ␉except that it also introduces a barrier.␊ |
403 | @result Returns TRUE on a match, FALSE otherwise.␊ |
404 | */␊ |
405 | bool␉OSAtomicCompareAndSwapPtrBarrier( void *__oldValue, void *__newValue, void * volatile *__theValue ) __OSX_AVAILABLE_STARTING(__MAC_10_5, __IPHONE_2_0);␊ |
406 | ␊ |
407 | ␊ |
408 | /*! @abstract Compare and swap for <code>int</code> values.␊ |
409 | @discussion␊ |
410 | ␉This function compares the value in <code>__oldValue</code> to the value␊ |
411 | ␉in the memory location referenced by <code>__theValue</code>. If the values␊ |
412 | ␉match, this function stores the value from <code>__newValue</code> into␊ |
413 | ␉that memory location atomically.␊ |
414 | ␊ |
415 | ␉This function is equivalent to {@link OSAtomicCompareAndSwap32}.␊ |
416 | @result Returns TRUE on a match, FALSE otherwise.␊ |
417 | */␊ |
418 | bool␉OSAtomicCompareAndSwapInt( int __oldValue, int __newValue, volatile int *__theValue ) __OSX_AVAILABLE_STARTING(__MAC_10_5, __IPHONE_2_0);␊ |
419 | ␊ |
420 | ␊ |
421 | /*! @abstract Compare and swap for <code>int</code> values.␊ |
422 | @discussion␊ |
423 | ␉This function compares the value in <code>__oldValue</code> to the value␊ |
424 | ␉in the memory location referenced by <code>__theValue</code>. If the values␊ |
425 | ␉match, this function stores the value from <code>__newValue</code> into␊ |
426 | ␉that memory location atomically.␊ |
427 | ␊ |
428 | ␉This function is equivalent to {@link OSAtomicCompareAndSwapInt}␊ |
429 | ␉except that it also introduces a barrier.␊ |
430 | ␊ |
431 | ␉This function is equivalent to {@link OSAtomicCompareAndSwap32Barrier}.␊ |
432 | @result Returns TRUE on a match, FALSE otherwise.␊ |
433 | */␊ |
434 | bool␉OSAtomicCompareAndSwapIntBarrier( int __oldValue, int __newValue, volatile int *__theValue ) __OSX_AVAILABLE_STARTING(__MAC_10_5, __IPHONE_2_0);␊ |
435 | ␊ |
436 | ␊ |
437 | /*! @abstract Compare and swap for <code>long</code> values.␊ |
438 | @discussion␊ |
439 | ␉This function compares the value in <code>__oldValue</code> to the value␊ |
440 | ␉in the memory location referenced by <code>__theValue</code>. If the values␊ |
441 | ␉match, this function stores the value from <code>__newValue</code> into␊ |
442 | ␉that memory location atomically.␊ |
443 | ␊ |
444 | ␉This function is equivalent to {@link OSAtomicCompareAndSwap32} on 32-bit architectures, ␊ |
445 | ␉or {@link OSAtomicCompareAndSwap64} on 64-bit architectures.␊ |
446 | @result Returns TRUE on a match, FALSE otherwise.␊ |
447 | */␊ |
448 | bool␉OSAtomicCompareAndSwapLong( long __oldValue, long __newValue, volatile long *__theValue ) __OSX_AVAILABLE_STARTING(__MAC_10_5, __IPHONE_2_0);␊ |
449 | ␊ |
450 | ␊ |
451 | /*! @abstract Compare and swap for <code>long</code> values.␊ |
452 | @discussion␊ |
453 | ␉This function compares the value in <code>__oldValue</code> to the value␊ |
454 | ␉in the memory location referenced by <code>__theValue</code>. If the values␊ |
455 | ␉match, this function stores the value from <code>__newValue</code> into␊ |
456 | ␉that memory location atomically.␊ |
457 | ␊ |
458 | ␉This function is equivalent to {@link OSAtomicCompareAndSwapLong}␊ |
459 | ␉except that it also introduces a barrier.␊ |
460 | ␊ |
461 | ␉This function is equivalent to {@link OSAtomicCompareAndSwap32} on 32-bit architectures, ␊ |
462 | ␉or {@link OSAtomicCompareAndSwap64} on 64-bit architectures.␊ |
463 | @result Returns TRUE on a match, FALSE otherwise.␊ |
464 | */␊ |
465 | bool␉OSAtomicCompareAndSwapLongBarrier( long __oldValue, long __newValue, volatile long *__theValue ) __OSX_AVAILABLE_STARTING(__MAC_10_5, __IPHONE_2_0);␊ |
466 | ␊ |
467 | ␊ |
468 | /*! @abstract Compare and swap for <code>uint64_t</code> values.␊ |
469 | @discussion␊ |
470 | ␉This function compares the value in <code>__oldValue</code> to the value␊ |
471 | ␉in the memory location referenced by <code>__theValue</code>. If the values␊ |
472 | ␉match, this function stores the value from <code>__newValue</code> into␊ |
473 | ␉that memory location atomically.␊ |
474 | @result Returns TRUE on a match, FALSE otherwise.␊ |
475 | */␊ |
476 | bool OSAtomicCompareAndSwap64( int64_t __oldValue, int64_t __newValue, volatile int64_t *__theValue );␊ |
477 | ␊ |
478 | ␊ |
479 | /*! @abstract Compare and swap for <code>uint64_t</code> values.␊ |
480 | @discussion␊ |
481 | ␉This function compares the value in <code>__oldValue</code> to the value␊ |
482 | ␉in the memory location referenced by <code>__theValue</code>. If the values␊ |
483 | ␉match, this function stores the value from <code>__newValue</code> into␊ |
484 | ␉that memory location atomically.␊ |
485 | ␊ |
486 | ␉This function is equivalent to {@link OSAtomicCompareAndSwap64}␊ |
487 | ␉except that it also introduces a barrier.␊ |
488 | @result Returns TRUE on a match, FALSE otherwise.␊ |
489 | */␊ |
490 | bool OSAtomicCompareAndSwap64Barrier( int64_t __oldValue, int64_t __newValue, volatile int64_t *__theValue ) __OSX_AVAILABLE_STARTING(__MAC_10_4, __IPHONE_3_2);␊ |
491 | ␊ |
492 | ␊ |
493 | /* Test and set. They return the original value of the bit, and operate on bit (0x80>>(n&7))␊ |
494 | * in byte ((char*)theAddress + (n>>3)).␊ |
495 | */␊ |
496 | /*! @abstract Atomic test and set␊ |
497 | @discussion␊ |
498 | ␉This function tests a bit in the value referenced by <code>__theAddress</code>␊ |
499 | ␉and if it is not set, sets it. The bit is chosen by the value of <code>__n</code>.␊ |
500 | ␉The bits are numbered in order beginning with bit 1 as the lowest order bit.␊ |
501 | ␊ |
502 | ␉For example, if <code>__theAddress</code> points to a 64-bit value,␊ |
503 | ␉to compare the value of the highest bit, you would specify <code>64</code> for␊ |
504 | ␉<code>__n</code>.␊ |
505 | @result␊ |
506 | ␉Returns the original value of the bit being tested.␊ |
507 | */␊ |
508 | bool OSAtomicTestAndSet( uint32_t __n, volatile void *__theAddress );␊ |
509 | ␊ |
510 | ␊ |
511 | /*! @abstract Atomic test and set with barrier␊ |
512 | @discussion␊ |
513 | ␉This function tests a bit in the value referenced by <code>__theAddress</code>␊ |
514 | ␉and if it is not set, sets it. The bit is chosen by the value of <code>__n</code>.␊ |
515 | ␉The bits are numbered in order beginning with bit 1 as the lowest order bit.␊ |
516 | ␊ |
517 | ␉For example, if <code>__theAddress</code> points to a 64-bit value,␊ |
518 | ␉to compare the value of the highest bit, you would specify <code>64</code> for␊ |
519 | ␉<code>__n</code>.␊ |
520 | ␊ |
521 | ␉This function is equivalent to {@link OSAtomicTestAndSet}␊ |
522 | ␉except that it also introduces a barrier.␊ |
523 | @result␊ |
524 | ␉Returns the original value of the bit being tested.␊ |
525 | */␊ |
526 | ␊ |
527 | bool OSAtomicTestAndSetBarrier( uint32_t __n, volatile void *__theAddress );␊ |
528 | ␊ |
529 | ␊ |
530 | ␊ |
531 | /*! @abstract Atomic test and clear␊ |
532 | @discussion␊ |
533 | ␉This function tests a bit in the value referenced by <code>__theAddress</code>␊ |
534 | ␉and if it is not cleared, clears it. The bit is chosen by the value of <code>__n</code>.␊ |
535 | ␉The bits are numbered in order beginning with bit 1 as the lowest order bit.␊ |
536 | ␊ |
537 | ␉For example, if <code>__theAddress</code> points to a 64-bit value,␊ |
538 | ␉to compare the value of the highest bit, you would specify <code>64</code> for␊ |
539 | ␉<code>__n</code>.␊ |
540 | @result␊ |
541 | ␉Returns the original value of the bit being tested.␊ |
542 | */␊ |
543 | bool OSAtomicTestAndClear( uint32_t __n, volatile void *__theAddress );␊ |
544 | ␊ |
545 | ␊ |
546 | /*! @abstract Atomic test and clear␊ |
547 | @discussion␊ |
548 | ␉This function tests a bit in the value referenced by <code>__theAddress</code>␊ |
549 | ␉and if it is not cleared, clears it. The bit is chosen by the value of <code>__n</code>.␊ |
550 | ␉The bits are numbered in order beginning with bit 1 as the lowest order bit.␊ |
551 | ␊ |
552 | ␉For example, if <code>__theAddress</code> points to a 64-bit value,␊ |
553 | ␉to compare the value of the highest bit, you would specify <code>64</code> for␊ |
554 | ␉<code>__n</code>.␊ |
555 | ␊ |
556 | ␉This function is equivalent to {@link OSAtomicTestAndSet}␊ |
557 | ␉except that it also introduces a barrier.␊ |
558 | @result␊ |
559 | ␉Returns the original value of the bit being tested.␊ |
560 | */␊ |
561 | bool OSAtomicTestAndClearBarrier( uint32_t __n, volatile void *__theAddress );␊ |
562 | ␊ |
563 | ␊ |
564 | /*! @group Spinlocks␊ |
565 | * These spinlocks use memory barriers as required to synchronize access to shared␊ |
566 | * memory protected by the lock.␊ |
567 | */␊ |
568 | ␊ |
569 | /*! @abstract The default value for an <code>OSSpinLock</code>.␊ |
570 | @discussion␊ |
571 | ␉The convention is that unlocked is zero, locked is nonzero.␊ |
572 | */␊ |
573 | #define␉OS_SPINLOCK_INIT 0␊ |
574 | ␊ |
575 | ␊ |
576 | /*! @abstract Data type for a spinlock.␊ |
577 | @discussion␊ |
578 | ␉You should always initialize a spinlock to {@link OS_SPINLOCK_INIT} before␊ |
579 | ␉using it.␊ |
580 | */␊ |
581 | typedef int32_t OSSpinLock;␊ |
582 | ␊ |
583 | ␊ |
584 | /*! @abstract Locks a spinlock if it would not block␊ |
585 | @result␊ |
586 | ␉Returns <code>false</code> if the lock was already held by another thread,␊ |
587 | ␉<code>true</code> if it took the lock successfully. ␊ |
588 | */␊ |
589 | bool OSSpinLockTry( volatile OSSpinLock *__lock );␊ |
590 | ␊ |
591 | ␊ |
592 | /*! @abstract Locks a spinlock␊ |
593 | @discussion␊ |
594 | ␉Although the lock operation spins, it employs various strategies␊ |
595 | to back off if the lock is held, making it immune to most priority-inversion␊ |
596 | ␉livelocks.␊ |
597 | */␊ |
598 | void OSSpinLockLock( volatile OSSpinLock *__lock );␊ |
599 | ␊ |
600 | ␊ |
601 | /*! @abstract Unlocks a spinlock */␊ |
602 | void OSSpinLockUnlock( volatile OSSpinLock *__lock );␊ |
603 | ␊ |
604 | ␊ |
605 | /*! @group Lockless atomic enqueue and dequeue␊ |
606 | * These routines manipulate singly-linked LIFO lists.␊ |
607 | */␊ |
608 | ␊ |
609 | /*! @abstract The data structure for a queue head.␊ |
610 | @discussion␊ |
611 | ␉You should always initialize a queue head structure with the␊ |
612 | ␉initialization vector {@link OS_ATOMIC_QUEUE_INIT} before use.␊ |
613 | */␊ |
614 | #if defined(__x86_64__)␊ |
615 | ␊ |
616 | typedef volatile struct {␊ |
617 | ␉void␉*opaque1;␊ |
618 | ␉long␉ opaque2;␊ |
619 | } __attribute__ ((aligned (16))) OSQueueHead;␊ |
620 | ␊ |
621 | #else␊ |
622 | ␊ |
623 | typedef volatile struct {␊ |
624 | ␉void␉*opaque1;␊ |
625 | ␉long␉ opaque2;␊ |
626 | } OSQueueHead;␊ |
627 | ␊ |
628 | #endif␊ |
629 | ␊ |
630 | /*! @abstract The initialization vector for a queue head. */␊ |
631 | #define␉OS_ATOMIC_QUEUE_INIT␉{ NULL, 0 }␊ |
632 | ␊ |
633 | /*! @abstract Enqueue an item onto a list.␊ |
634 | @discussion␊ |
635 | ␉Memory barriers are incorporated as needed to permit thread-safe access␊ |
636 | ␉to the queue element.␊ |
637 | @param __list␊ |
638 | ␉The list on which you want to enqueue the item.␊ |
639 | @param __new␊ |
640 | ␉The item to add.␊ |
641 | @param __offset␊ |
642 | ␉The "offset" parameter is the offset (in bytes) of the link field␊ |
643 | ␉from the beginning of the data structure being queued (<code>__new</code>).␊ |
644 | ␉The link field should be a pointer type.␊ |
645 | ␉The <code>__offset</code> value needs to be same for all enqueuing and␊ |
646 | ␉dequeuing operations on the same queue, even if different structure types␊ |
647 | ␉are enqueued on that queue. The use of <code>offsetset()</code>, defined in␊ |
648 | ␉<code>stddef.h</code> is the common way to specify the <code>__offset</code>␊ |
649 | ␉value.␊ |
650 | */␊ |
651 | void OSAtomicEnqueue( OSQueueHead *__list, void *__new, size_t __offset) __OSX_AVAILABLE_STARTING(__MAC_10_5, __IPHONE_4_0);␊ |
652 | ␊ |
653 | ␊ |
654 | /*! @abstract Dequeue an item from a list.␊ |
655 | @discussion␊ |
656 | ␉Memory barriers are incorporated as needed to permit thread-safe access␊ |
657 | ␉to the queue element.␊ |
658 | @param __list␊ |
659 | ␉The list on which you want to enqueue the item.␊ |
660 | @param __offset␊ |
661 | ␉The "offset" parameter is the offset (in bytes) of the link field␊ |
662 | ␉from the beginning of the data structure being queued (<code>__new</code>).␊ |
663 | ␉The link field should be a pointer type.␊ |
664 | ␉The <code>__offset</code> value needs to be same for all enqueuing and␊ |
665 | ␉dequeuing operations on the same queue, even if different structure types␊ |
666 | ␉are enqueued on that queue. The use of <code>offsetset()</code>, defined in␊ |
667 | ␉<code>stddef.h</code> is the common way to specify the <code>__offset</code>␊ |
668 | ␉value.␊ |
669 | @result␊ |
670 | ␉Returns the most recently enqueued element, or <code>NULL</code> if the␊ |
671 | ␉list is empty. ␊ |
672 | */␊ |
673 | void* OSAtomicDequeue( OSQueueHead *__list, size_t __offset) __OSX_AVAILABLE_STARTING(__MAC_10_5, __IPHONE_4_0);␊ |
674 | ␊ |
675 | #if defined(__x86_64__) || defined(__i386__)␊ |
676 | ␊ |
677 | /*! @group Lockless atomic fifo enqueue and dequeue␊ |
678 | * These routines manipulate singly-linked FIFO lists.␊ |
679 | */␊ |
680 | ␊ |
681 | /*! @abstract The data structure for a fifo queue head.␊ |
682 | @discussion␊ |
683 | ␉You should always initialize a fifo queue head structure with the␊ |
684 | ␉initialization vector {@link OS_ATOMIC_FIFO_QUEUE_INIT} before use.␊ |
685 | */␊ |
686 | #if defined(__x86_64__)␊ |
687 | ␊ |
688 | typedef␉volatile struct {␊ |
689 | ␉void␉*opaque1;␊ |
690 | ␉void␉*opaque2;␊ |
691 | ␉int␉ opaque3;␊ |
692 | } __attribute__ ((aligned (16))) OSFifoQueueHead;␊ |
693 | ␊ |
694 | #else␊ |
695 | ␊ |
696 | typedef␉volatile struct {␊ |
697 | ␉void␉*opaque1;␊ |
698 | ␉void␉*opaque2;␊ |
699 | ␉int␉ opaque3;␊ |
700 | } OSFifoQueueHead;␊ |
701 | ␊ |
702 | #endif␊ |
703 | ␊ |
704 | /*! @abstract The initialization vector for a fifo queue head. */␊ |
705 | #define OS_ATOMIC_FIFO_QUEUE_INIT { NULL, NULL, 0 }␊ |
706 | ␊ |
707 | /*! @abstract Enqueue an item onto a list.␊ |
708 | @discussion␊ |
709 | ␉Memory barriers are incorporated as needed to permit thread-safe access␊ |
710 | ␉to the queue element.␊ |
711 | @param __list␊ |
712 | ␉The list on which you want to enqueue the item.␊ |
713 | @param __new␊ |
714 | ␉The item to add.␊ |
715 | @param __offset␊ |
716 | ␉The "offset" parameter is the offset (in bytes) of the link field␊ |
717 | ␉from the beginning of the data structure being queued (<code>__new</code>).␊ |
718 | ␉The link field should be a pointer type.␊ |
719 | ␉The <code>__offset</code> value needs to be same for all enqueuing and␊ |
720 | ␉dequeuing operations on the same queue, even if different structure types␊ |
721 | ␉are enqueued on that queue. The use of <code>offsetset()</code>, defined in␊ |
722 | ␉<code>stddef.h</code> is the common way to specify the <code>__offset</code>␊ |
723 | ␉value.␊ |
724 | */␊ |
725 | void OSAtomicFifoEnqueue( OSFifoQueueHead *__list, void *__new, size_t __offset) __OSX_AVAILABLE_STARTING(__MAC_10_7, __IPHONE_NA);␊ |
726 | ␊ |
727 | /*! @abstract Dequeue an item from a list.␊ |
728 | @discussion␊ |
729 | ␉Memory barriers are incorporated as needed to permit thread-safe access␊ |
730 | ␉to the queue element.␊ |
731 | @param __list␊ |
732 | ␉The list on which you want to enqueue the item.␊ |
733 | @param __offset␊ |
734 | ␉The "offset" parameter is the offset (in bytes) of the link field␊ |
735 | ␉from the beginning of the data structure being queued (<code>__new</code>).␊ |
736 | ␉The link field should be a pointer type.␊ |
737 | ␉The <code>__offset</code> value needs to be same for all enqueuing and␊ |
738 | ␉dequeuing operations on the same queue, even if different structure types␊ |
739 | ␉are enqueued on that queue. The use of <code>offsetset()</code>, defined in␊ |
740 | ␉<code>stddef.h</code> is the common way to specify the <code>__offset</code>␊ |
741 | ␉value.␊ |
742 | @result␊ |
743 | ␉Returns the oldest enqueued element, or <code>NULL</code> if the␊ |
744 | ␉list is empty. ␊ |
745 | */␊ |
746 | void* OSAtomicFifoDequeue( OSFifoQueueHead *__list, size_t __offset) __OSX_AVAILABLE_STARTING(__MAC_10_7, __IPHONE_NA);␊ |
747 | ␊ |
748 | #endif /* __i386__ || __x86_64__ */␊ |
749 | ␊ |
750 | /*! @group Memory barriers */␊ |
751 | ␊ |
752 | /*! @abstract Memory barrier.␊ |
753 | @discussion␊ |
754 | ␉This function serves as both a read and write barrier.␊ |
755 | */␊ |
756 | void OSMemoryBarrier( void );␊ |
757 | ␊ |
758 | __END_DECLS␊ |
759 | ␊ |
760 | #endif /* _OSATOMIC_H_ */␊ |
761 | |