Chameleon

Chameleon Svn Source Tree

Root/branches/mozodojo/i386/include/libkern/OSAtomic.h

1/*
2 * Copyright (c) 2004-2006 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
11 * file.
12 *
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
20 *
21 * @APPLE_LICENSE_HEADER_END@
22 */
23
24#ifndef _OSATOMIC_H_
25#define _OSATOMIC_H_
26
27#include <stddef.h>
28#include <sys/cdefs.h>
29#include <stdint.h>
30#include <stdbool.h>
31
32/* These are the preferred versions of the atomic and synchronization operations.
33 * Their implementation is customized at boot time for the platform, including
34 * late-breaking errata fixes as necessary. They are thread safe.
35 *
36 * WARNING: all addresses passed to these functions must be "naturally aligned", ie
37 * int32_t's must be 32-bit aligned (low 2 bits of address zero), and int64_t's
38 * must be 64-bit aligned (low 3 bits of address zero.)
39 *
40 * Note that some versions of the atomic functions incorporate memory barriers,
41 * and some do not. Barriers strictly order memory access on a weakly-ordered
42 * architecture such as PPC. All loads and stores executed in sequential program
43 * order before the barrier will complete before any load or store executed after
44 * the barrier. On a uniprocessor, the barrier operation is typically a nop.
45 * On a multiprocessor, the barrier can be quite expensive on some platforms,
46 * eg PPC.
47 *
48 * Most code will want to use the barrier functions to insure that memory shared
49 * between threads is properly synchronized. For example, if you want to initialize
50 * a shared data structure and then atomically increment a variable to indicate
51 * that the initialization is complete, then you must use OSAtomicIncrement32Barrier()
52 * to ensure that the stores to your data structure complete before the atomic add.
53 * Likewise, the consumer of that data structure must use OSAtomicDecrement32Barrier(),
54 * in order to ensure that their loads of the structure are not executed before
55 * the atomic decrement. On the other hand, if you are simply incrementing a global
56 * counter, then it is safe and potentially faster to use OSAtomicIncrement32().
57 *
58 * If you are unsure which version to use, prefer the barrier variants as they are
59 * safer.
60 *
61 * The spinlock and queue operations always incorporate a barrier.
62 */
63__BEGIN_DECLS
64
65
66/* Arithmetic functions. They return the new value.
67 */
68int32_tOSAtomicAdd32( int32_t __theAmount, volatile int32_t *__theValue );
69int32_tOSAtomicAdd32Barrier( int32_t __theAmount, volatile int32_t *__theValue );
70
71__inline static
72int32_tOSAtomicIncrement32( volatile int32_t *__theValue )
73 { return OSAtomicAdd32( 1, __theValue); }
74__inline static
75int32_tOSAtomicIncrement32Barrier( volatile int32_t *__theValue )
76 { return OSAtomicAdd32Barrier( 1, __theValue); }
77
78__inline static
79int32_tOSAtomicDecrement32( volatile int32_t *__theValue )
80 { return OSAtomicAdd32( -1, __theValue); }
81__inline static
82int32_tOSAtomicDecrement32Barrier( volatile int32_t *__theValue )
83 { return OSAtomicAdd32Barrier( -1, __theValue); }
84
85#if defined(__ppc64__) || defined(__i386__) || defined(__x86_64__) || defined(__arm__)
86
87int64_tOSAtomicAdd64( int64_t __theAmount, volatile int64_t *__theValue );
88int64_tOSAtomicAdd64Barrier( int64_t __theAmount, volatile int64_t *__theValue );
89
90__inline static
91int64_tOSAtomicIncrement64( volatile int64_t *__theValue )
92 { return OSAtomicAdd64( 1, __theValue); }
93__inline static
94int64_tOSAtomicIncrement64Barrier( volatile int64_t *__theValue )
95 { return OSAtomicAdd64Barrier( 1, __theValue); }
96
97__inline static
98int64_tOSAtomicDecrement64( volatile int64_t *__theValue )
99 { return OSAtomicAdd64( -1, __theValue); }
100__inline static
101int64_tOSAtomicDecrement64Barrier( volatile int64_t *__theValue )
102 { return OSAtomicAdd64Barrier( -1, __theValue); }
103
104#endif /* defined(__ppc64__) || defined(__i386__) || defined(__x86_64__) || defined(__arm__) */
105
106
107/* Boolean functions (and, or, xor.) These come in four versions for each operation:
108 * with and without barriers, and returning the old or new value of the operation.
109 * The "Orig" versions return the original value, ie before the operation, the non-Orig
110 * versions return the value after the operation. All are layered on top of
111 * compare-and-swap.
112 */
113int32_tOSAtomicOr32( uint32_t __theMask, volatile uint32_t *__theValue );
114int32_tOSAtomicOr32Barrier( uint32_t __theMask, volatile uint32_t *__theValue );
115int32_tOSAtomicOr32Orig( uint32_t __theMask, volatile uint32_t *__theValue );
116int32_tOSAtomicOr32OrigBarrier( uint32_t __theMask, volatile uint32_t *__theValue );
117
118int32_tOSAtomicAnd32( uint32_t __theMask, volatile uint32_t *__theValue );
119int32_tOSAtomicAnd32Barrier( uint32_t __theMask, volatile uint32_t *__theValue );
120int32_tOSAtomicAnd32Orig( uint32_t __theMask, volatile uint32_t *__theValue );
121int32_tOSAtomicAnd32OrigBarrier( uint32_t __theMask, volatile uint32_t *__theValue );
122
123int32_tOSAtomicXor32( uint32_t __theMask, volatile uint32_t *__theValue );
124int32_tOSAtomicXor32Barrier( uint32_t __theMask, volatile uint32_t *__theValue );
125int32_tOSAtomicXor32Orig( uint32_t __theMask, volatile uint32_t *__theValue );
126int32_tOSAtomicXor32OrigBarrier( uint32_t __theMask, volatile uint32_t *__theValue );
127
128
129/* Compare and swap. They return true if the swap occured. There are several versions,
130 * depending on data type and whether or not a barrier is used.
131 */
132bool OSAtomicCompareAndSwap32( int32_t __oldValue, int32_t __newValue, volatile int32_t *__theValue );
133bool OSAtomicCompareAndSwap32Barrier( int32_t __oldValue, int32_t __newValue, volatile int32_t *__theValue );
134boolOSAtomicCompareAndSwapPtr( void *__oldValue, void *__newValue, void * volatile *__theValue );
135boolOSAtomicCompareAndSwapPtrBarrier( void *__oldValue, void *__newValue, void * volatile *__theValue );
136boolOSAtomicCompareAndSwapInt( int __oldValue, int __newValue, volatile int *__theValue );
137boolOSAtomicCompareAndSwapIntBarrier( int __oldValue, int __newValue, volatile int *__theValue );
138boolOSAtomicCompareAndSwapLong( long __oldValue, long __newValue, volatile long *__theValue );
139boolOSAtomicCompareAndSwapLongBarrier( long __oldValue, long __newValue, volatile long *__theValue );
140
141#if defined(__ppc64__) || defined(__i386__) || defined(__x86_64__) || defined(__arm__)
142
143bool OSAtomicCompareAndSwap64( int64_t __oldValue, int64_t __newValue, volatile int64_t *__theValue );
144bool OSAtomicCompareAndSwap64Barrier( int64_t __oldValue, int64_t __newValue, volatile int64_t *__theValue );
145
146#endif /* defined(__ppc64__) || defined(__i386__) || defined(__x86_64__) || defined(__arm__) */
147
148
149/* Test and set. They return the original value of the bit, and operate on bit (0x80>>(n&7))
150 * in byte ((char*)theAddress + (n>>3)).
151 */
152bool OSAtomicTestAndSet( uint32_t __n, volatile void *__theAddress );
153bool OSAtomicTestAndSetBarrier( uint32_t __n, volatile void *__theAddress );
154bool OSAtomicTestAndClear( uint32_t __n, volatile void *__theAddress );
155bool OSAtomicTestAndClearBarrier( uint32_t __n, volatile void *__theAddress );
156
157
158/* Spinlocks. These use memory barriers as required to synchronize access to shared
159 * memory protected by the lock. The lock operation spins, but employs various strategies
160 * to back off if the lock is held, making it immune to most priority-inversion livelocks.
161 * The try operation immediately returns false if the lock was held, true if it took the
162 * lock. The convention is that unlocked is zero, locked is nonzero.
163 */
164#defineOS_SPINLOCK_INIT 0
165
166typedef int32_t OSSpinLock;
167
168bool OSSpinLockTry( volatile OSSpinLock *__lock );
169void OSSpinLockLock( volatile OSSpinLock *__lock );
170void OSSpinLockUnlock( volatile OSSpinLock *__lock );
171
172
173/* Lockless atomic enqueue and dequeue. These routines manipulate singly
174 * linked LIFO lists. Ie, a dequeue will return the most recently enqueued
175 * element, or NULL if the list is empty. The "offset" parameter is the offset
176 * in bytes of the link field within the data structure being queued. The
177 * link field should be a pointer type. Memory barriers are incorporated as
178 * needed to permit thread-safe access to the queue element.
179 */
180#if defined(__x86_64__)
181
182typedef volatile struct {
183void*opaque1;
184long opaque2;
185} OSQueueHead __attribute__ ((aligned (16)));
186
187#else
188
189typedef volatile struct {
190void*opaque1;
191long opaque2;
192} OSQueueHead;
193
194#endif
195
196#defineOS_ATOMIC_QUEUE_INIT{ NULL, 0 }
197
198void OSAtomicEnqueue( OSQueueHead *__list, void *__new, size_t __offset);
199void* OSAtomicDequeue( OSQueueHead *__list, size_t __offset);
200
201
202/* Memory barrier. It is both a read and write barrier.
203 */
204void OSMemoryBarrier( void );
205
206
207__END_DECLS
208
209#endif /* _OSATOMIC_H_ */
210

Archive Download this file

Revision: 1232