Chameleon

Chameleon Svn Source Tree

Root/branches/ErmaC/Trunk/i386/include/libkern/ppc/OSByteOrder.h

1/*
2 * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29#ifndef _OS_OSBYTEORDERPPC_H
30#define _OS_OSBYTEORDERPPC_H
31
32#include <stdint.h>
33
34#if !defined(OS_INLINE)
35# if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
36# define OS_INLINE static inline
37# elif defined(__MWERKS__) || defined(__cplusplus)
38# define OS_INLINE static inline
39# else
40# define OS_INLINE static __inline__
41# endif
42#endif
43
44/* Functions for byte reversed loads. */
45
46OS_INLINE
47uint16_t
48OSReadSwapInt16(
49 const volatile void * base,
50 uintptr_t byteOffset
51)
52{
53 uint16_t result;
54 volatile uint16_t *addr = (volatile uint16_t *)((uintptr_t)base + byteOffset);
55
56#if defined(__llvm__)
57 result = *addr;
58 result = ((result << 8) | (result >> 8));
59#else
60 __asm__ ("lhbrx %0, %2, %1"
61 : "=r" (result)
62 : "r" (base), "bO" (byteOffset), "m" (*addr));
63#endif
64
65 return result;
66}
67
68OS_INLINE
69uint32_t
70OSReadSwapInt32(
71 const volatile void * base,
72 uintptr_t byteOffset
73)
74{
75 uint32_t result;
76 volatile uint32_t *addr = (volatile uint32_t *)((uintptr_t)base + byteOffset);
77
78#if defined(__llvm__)
79 result = __builtin_bswap32(*addr);
80#else
81 __asm__ ("lwbrx %0, %2, %1"
82 : "=r" (result)
83 : "r" (base), "bO" (byteOffset), "m" (*addr));
84#endif
85
86 return result;
87}
88
89OS_INLINE
90uint64_t
91OSReadSwapInt64(
92 const volatile void * base,
93 uintptr_t byteOffset
94)
95{
96 volatile uint64_t *addr = (volatile uint64_t *)((uintptr_t)base + byteOffset);
97 union {
98 uint64_t u64;
99 uint32_t u32[2];
100 } u;
101
102#if defined(__llvm__)
103 u.u64 = __builtin_bswap64(*addr);
104#else
105 __asm__ ("lwbrx %0, %3, %2\n\t"
106 "lwbrx %1, %4, %2"
107 : "=&r" (u.u32[1]), "=r" (u.u32[0])
108 : "r" (base), "bO" (byteOffset), "b" (byteOffset + 4), "m" (*addr));
109#endif
110
111 return u.u64;
112}
113
114/* Functions for byte reversed stores. */
115
116OS_INLINE
117void
118OSWriteSwapInt16(
119 volatile void * base,
120 uintptr_t byteOffset,
121 uint16_t data
122)
123{
124 volatile uint16_t *addr = (volatile uint16_t *)((uintptr_t)base + byteOffset);
125
126#if defined(__llvm__)
127 *addr = ((data >> 8) | (data << 8));
128#else
129 __asm__ ("sthbrx %1, %3, %2"
130 : "=m" (*addr)
131 : "r" (data), "r" (base), "bO" (byteOffset));
132#endif
133}
134
135OS_INLINE
136void
137OSWriteSwapInt32(
138 volatile void * base,
139 uintptr_t byteOffset,
140 uint32_t data
141)
142{
143 volatile uint32_t *addr = (volatile uint32_t *)((uintptr_t)base + byteOffset);
144
145#if defined(__llvm__)
146 *addr = __builtin_bswap32(data);
147#else
148 __asm__ ("stwbrx %1, %3, %2"
149 : "=m" (*addr)
150 : "r" (data), "r" (base), "bO" (byteOffset));
151#endif
152}
153
154OS_INLINE
155void
156OSWriteSwapInt64(
157 volatile void * base,
158 uintptr_t byteOffset,
159 uint64_t data
160)
161{
162 volatile uint64_t *addr = (volatile uint64_t *)((uintptr_t)base + byteOffset);
163
164#if defined(__llvm__)
165 *addr = __builtin_bswap64(data);
166#else
167 uint32_t hi = (uint32_t)(data >> 32);
168 uint32_t lo = (uint32_t)(data & 0xffffffff);
169
170 __asm__ ("stwbrx %1, %4, %3\n\t"
171 "stwbrx %2, %5, %3"
172 : "=m" (*addr)
173 : "r" (lo), "r" (hi), "r" (base), "bO" (byteOffset), "b" (byteOffset + 4));
174#endif
175}
176
177/* Generic byte swapping functions. */
178
179OS_INLINE
180uint16_t
181_OSSwapInt16(
182 uint16_t data
183)
184{
185 return OSReadSwapInt16(&data, 0);
186}
187
188OS_INLINE
189uint32_t
190_OSSwapInt32(
191 uint32_t data
192)
193{
194 return OSReadSwapInt32(&data, 0);
195}
196
197OS_INLINE
198uint64_t
199_OSSwapInt64(
200 uint64_t data
201)
202{
203 return OSReadSwapInt64(&data, 0);
204}
205
206#endif /* ! _OS_OSBYTEORDERPPC_H */
207

Archive Download this file

Revision: 1622