Chameleon

Chameleon Svn Source Tree

Root/branches/cparm/i386/libsaio/cpu_data.h

1/*
2 * Copyright (c) 2000-2008 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28/*
29 * @OSF_COPYRIGHT@
30 *
31 */
32
33#ifndefI386_CPU_DATA
34#define I386_CPU_DATA
35
36#include <mach/i386/thread_status.h>
37#include <mach/i386/vm_param.h>
38
39
40/*
41 * Data structures referenced (anonymously) from per-cpu data:
42 */
43struct cpu_cons_buffer;
44struct cpu_desc_table;
45struct mca_state;
46
47
48
49#if defined(__i386__)
50
51typedef struct {
52struct i386_tss *cdi_ktss;
53#if MACH_KDB
54struct i386_tss *cdi_dbtss;
55#endif /* MACH_KDB */
56struct __attribute__((packed)) {
57uint16_t size;
58struct fake_descriptor *ptr;
59} cdi_gdt, cdi_idt;
60struct fake_descriptor*cdi_ldt;
61vm_offset_tcdi_sstk;
62} cpu_desc_index_t;
63
64typedef enum {
65TASK_MAP_32BIT,/* 32-bit, compatibility mode */
66TASK_MAP_64BIT,/* 64-bit, separate address space */
67TASK_MAP_64BIT_SHARED/* 64-bit, kernel-shared addr space */
68} task_map_t;
69
70#elif defined(__x86_64__)
71
72
73typedef struct {
74struct x86_64_tss*cdi_ktss;
75#if MACH_KDB
76struct x86_64_tss*cdi_dbtss;
77#endif /* MACH_KDB */
78struct __attribute__((packed)) {
79uint16_t size;
80void *ptr;
81} cdi_gdt, cdi_idt;
82struct fake_descriptor*cdi_ldt;
83vm_offset_tcdi_sstk;
84} cpu_desc_index_t;
85
86typedef enum {
87TASK_MAP_32BIT,/* 32-bit user, compatibility mode */
88TASK_MAP_64BIT,/* 64-bit user thread, shared space */
89} task_map_t;
90
91#else
92#error Unsupported architecture
93#endif
94
95/*
96 * This structure is used on entry into the (uber-)kernel on syscall from
97 * a 64-bit user. It contains the address of the machine state save area
98 * for the current thread and a temporary place to save the user's rsp
99 * before loading this address into rsp.
100 */
101typedef struct {
102addr64_tcu_isf;/* thread->pcb->iss.isf */
103uint64_tcu_tmp;/* temporary scratch */
104 addr64_tcu_user_gs_base;
105} cpu_uber_t;
106
107/*
108 * Per-cpu data.
109 *
110 * Each processor has a per-cpu data area which is dereferenced through the
111 * current_cpu_datap() macro. For speed, the %gs segment is based here, and
112 * using this, inlines provides single-instruction access to frequently used
113 * members - such as get_cpu_number()/cpu_number(), and get_active_thread()/
114 * current_thread().
115 *
116 * Cpu data owned by another processor can be accessed using the
117 * cpu_datap(cpu_number) macro which uses the cpu_data_ptr[] array of per-cpu
118 * pointers.
119 */
120typedef struct cpu_data
121{
122struct cpu_data*cpu_this;/* pointer to myself */
123thread_tcpu_active_thread;
124void*cpu_int_state;/* interrupt state */
125vm_offset_tcpu_active_stack;/* kernel stack base */
126vm_offset_tcpu_kernel_stack;/* kernel stack top */
127vm_offset_tcpu_int_stack_top;
128intcpu_preemption_level;
129intcpu_simple_lock_count;
130intcpu_interrupt_level;
131intcpu_number;/* Logical CPU */
132intcpu_phys_number;/* Physical CPU */
133uint32_tcpu_id;/* Platform Expert */
134intcpu_signals;/* IPI events */
135intcpu_prior_signals;/* Last set of events,
136 * debugging
137 */
138intcpu_mcount_off;/* mcount recursion */
139uint32_tcpu_pending_ast;
140intcpu_type;
141intcpu_subtype;
142intcpu_threadtype;
143intcpu_running;
144uint32_trtclock_timer;
145boolean_tcpu_is64bit;
146task_map_tcpu_task_map;
147volatile addr64_tcpu_task_cr3;
148volatile addr64_tcpu_active_cr3;
149addr64_tcpu_kernel_cr3;
150cpu_uber_tcpu_uber;
151void*cpu_chud;
152void*cpu_console_buf;
153uint32_tlcpu;
154struct processor*cpu_processor;
155#if NCOPY_WINDOWS > 0
156struct cpu_pmap*cpu_pmap;
157#endif
158struct cpu_desc_table*cpu_desc_tablep;
159struct fake_descriptor*cpu_ldtp;
160cpu_desc_index_tcpu_desc_index;
161intcpu_ldt;
162#ifdef MACH_KDB
163/* XXX Untested: */
164intcpu_db_pass_thru;
165vm_offset_tcpu_db_stacks;
166void*cpu_kdb_saved_state;
167spl_tcpu_kdb_saved_ipl;
168intcpu_kdb_is_slave;
169intcpu_kdb_active;
170#endif /* MACH_KDB */
171boolean_tcpu_iflag;
172boolean_tcpu_boot_complete;
173intcpu_hibernate;
174
175#if NCOPY_WINDOWS > 0
176vm_offset_tcpu_copywindow_base;
177uint64_t*cpu_copywindow_pdp;
178
179vm_offset_tcpu_physwindow_base;
180uint64_t*cpu_physwindow_ptep;
181void *cpu_hi_iss;
182#endif
183
184
185
186volatile boolean_tcpu_tlb_invalid;
187uint32_tcpu_hwIntCnt[256];/* Interrupt counts */
188uint64_tcpu_dr7; /* debug control register */
189uint64_tcpu_int_event_time;/* intr entry/exit time */
190#if CONFIG_VMX
191vmx_cpu_tcpu_vmx;/* wonderful world of virtualization */
192#endif
193#if CONFIG_MCA
194struct mca_state*cpu_mca_state;/* State at MC fault */
195#endif
196uint64_tcpu_uber_arg_store;/* Double mapped address
197 * of current thread's
198 * uu_arg array.
199 */
200uint64_tcpu_uber_arg_store_valid; /* Double mapped
201 * address of pcb
202 * arg store
203 * validity flag.
204 */
205void*cpu_nanotime;/* Nanotime info */
206thread_tcsw_old_thread;
207thread_tcsw_new_thread;
208uint64_tcpu_max_observed_int_latency;
209intcpu_max_observed_int_latency_vector;
210uint64_tdebugger_entry_time;
211volatile boolean_tcpu_NMI_acknowledged;
212/* A separate nested interrupt stack flag, to account
213 * for non-nested interrupts arriving while on the interrupt stack
214 * Currently only occurs when AICPM enables interrupts on the
215 * interrupt stack during processor offlining.
216 */
217uint32_tcpu_nested_istack;
218uint32_tcpu_nested_istack_events;
219} cpu_data_t;
220
221/* Macro to generate inline bodies to retrieve per-cpu data fields. */
222#ifndef offsetof
223#define offsetof(TYPE,MEMBER) ((size_t) &((TYPE *)0)->MEMBER)
224#endif /* offsetof */
225#define CPU_DATA_GET(member,type)\
226type ret;\
227__asm__ volatile ("mov %%gs:%P1,%0"\
228: "=r" (ret)\
229: "i" (offsetof(cpu_data_t,member)));\
230return ret;
231
232/*
233 * Everyone within the osfmk part of the kernel can use the fast
234 * inline versions of these routines. Everyone outside, must call
235 * the real thing,
236 */
237static inline thread_t
238get_active_thread(void)
239{
240CPU_DATA_GET(cpu_active_thread,thread_t)
241}
242#define current_thread_fast()get_active_thread()
243#define current_thread()current_thread_fast()
244
245#if defined(__i386__)
246static inline boolean_t
247get_is64bit(void)
248{
249CPU_DATA_GET(cpu_is64bit, boolean_t)
250}
251#define cpu_mode_is64bit()get_is64bit()
252#elif defined(__x86_64__)
253#define cpu_mode_is64bit()TRUE
254#endif
255
256static inline int
257get_preemption_level(void)
258{
259CPU_DATA_GET(cpu_preemption_level,int)
260}
261static inline int
262get_simple_lock_count(void)
263{
264CPU_DATA_GET(cpu_simple_lock_count,int)
265}
266static inline int
267get_interrupt_level(void)
268{
269CPU_DATA_GET(cpu_interrupt_level,int)
270}
271static inline int
272get_cpu_number(void)
273{
274CPU_DATA_GET(cpu_number,int)
275}
276static inline int
277get_cpu_phys_number(void)
278{
279CPU_DATA_GET(cpu_phys_number,int)
280}
281
282static inline void
283disable_preemption(void)
284{
285__asm__ volatile ("incl %%gs:%P0"
286:
287: "i" (offsetof(cpu_data_t, cpu_preemption_level)));
288}
289#if UNUSED
290static inline void
291enable_preemption(void)
292{
293//assert(get_preemption_level() > 0);
294
295if (get_preemption_level() > 0){
296__asm__ volatile ("decl %%gs:%P0\n\t"
297 "jne 1f\n\t"
298 "call _kernel_preempt_check\n\t"
299 "1:"
300 : /* no outputs */
301 : "i" (offsetof(cpu_data_t, cpu_preemption_level))
302 : "eax", "ecx", "edx", "cc", "memory");
303}
304}
305#endif
306static inline void
307enable_preemption_no_check(void)
308{
309//assert(get_preemption_level() > 0);
310
311if (get_preemption_level() > 0){
312__asm__ volatile ("decl %%gs:%P0"
313 : /* no outputs */
314 : "i" (offsetof(cpu_data_t, cpu_preemption_level))
315 : "cc", "memory");
316}
317}
318
319static inline void
320mp_disable_preemption(void)
321{
322disable_preemption();
323}
324#if UNUSED
325static inline void
326mp_enable_preemption(void)
327{
328enable_preemption();
329}
330#endif
331static inline void
332mp_enable_preemption_no_check(void)
333{
334enable_preemption_no_check();
335}
336
337static inline cpu_data_t *
338current_cpu_datap(void)
339{
340CPU_DATA_GET(cpu_this, cpu_data_t *);
341}
342
343
344#endif/* I386_CPU_DATA */
345

Archive Download this file

Revision: 1119