Chameleon

Chameleon Svn Source Tree

Root/branches/cparm/i386/libsaio/cpu_data.h

1/*
2 * Copyright (c) 2000-2008 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28/*
29 * @OSF_COPYRIGHT@
30 *
31 */
32
33#ifndefI386_CPU_DATA
34#define I386_CPU_DATA
35
36#include <mach/i386/thread_status.h>
37#include <mach/i386/vm_param.h>
38
39/* Extracted from pal_routine */
40struct pal_cpu_data; /* Defined per-platform */
41
42struct pal_cpu_data {
43
44};
45
46/*
47 * Data structures referenced (anonymously) from per-cpu data:
48 */
49struct cpu_cons_buffer;
50struct cpu_desc_table;
51struct mca_state;
52
53
54
55#if defined(__i386__)
56
57typedef struct {
58struct i386_tss *cdi_ktss;
59#if MACH_KDB
60struct i386_tss *cdi_dbtss;
61#endif /* MACH_KDB */
62struct __attribute__((packed)) {
63uint16_t size;
64struct fake_descriptor *ptr;
65} cdi_gdt, cdi_idt;
66struct fake_descriptor*cdi_ldt;
67vm_offset_tcdi_sstk;
68} cpu_desc_index_t;
69
70typedef enum {
71TASK_MAP_32BIT,/* 32-bit, compatibility mode */
72TASK_MAP_64BIT,/* 64-bit, separate address space */
73TASK_MAP_64BIT_SHARED/* 64-bit, kernel-shared addr space */
74} task_map_t;
75
76#elif defined(__x86_64__)
77
78
79typedef struct {
80struct x86_64_tss*cdi_ktss;
81#if MACH_KDB
82struct x86_64_tss*cdi_dbtss;
83#endif /* MACH_KDB */
84struct __attribute__((packed)) {
85uint16_t size;
86void *ptr;
87} cdi_gdt, cdi_idt;
88struct fake_descriptor*cdi_ldt;
89vm_offset_tcdi_sstk;
90} cpu_desc_index_t;
91
92typedef enum {
93TASK_MAP_32BIT,/* 32-bit user, compatibility mode */
94TASK_MAP_64BIT,/* 64-bit user thread, shared space */
95} task_map_t;
96
97#else
98#error Unsupported architecture
99#endif
100
101/*
102 * This structure is used on entry into the (uber-)kernel on syscall from
103 * a 64-bit user. It contains the address of the machine state save area
104 * for the current thread and a temporary place to save the user's rsp
105 * before loading this address into rsp.
106 */
107typedef struct {
108addr64_tcu_isf;/* thread->pcb->iss.isf */
109uint64_tcu_tmp;/* temporary scratch */
110 addr64_tcu_user_gs_base;
111} cpu_uber_t;
112
113/*
114 * Per-cpu data.
115 *
116 * Each processor has a per-cpu data area which is dereferenced through the
117 * current_cpu_datap() macro. For speed, the %gs segment is based here, and
118 * using this, inlines provides single-instruction access to frequently used
119 * members - such as get_cpu_number()/cpu_number(), and get_active_thread()/
120 * current_thread().
121 *
122 * Cpu data owned by another processor can be accessed using the
123 * cpu_datap(cpu_number) macro which uses the cpu_data_ptr[] array of per-cpu
124 * pointers.
125 */
126typedef struct cpu_data
127{
128struct pal_cpu_datacpu_pal_data;/* PAL-specific data */
129#definecpu_pd cpu_pal_data/* convenience alias */
130struct cpu_data*cpu_this;/* pointer to myself */
131thread_tcpu_active_thread;
132intcpu_preemption_level;
133intcpu_number;/* Logical CPU */
134void*cpu_int_state;/* interrupt state */
135vm_offset_tcpu_active_stack;/* kernel stack base */
136vm_offset_tcpu_kernel_stack;/* kernel stack top */
137vm_offset_tcpu_int_stack_top;
138intcpu_interrupt_level;
139intcpu_phys_number;/* Physical CPU */
140uint32_tcpu_id;/* Platform Expert */
141intcpu_signals;/* IPI events */
142intcpu_prior_signals;/* Last set of events,
143 * debugging
144 */
145intcpu_mcount_off;/* mcount recursion */
146uint32_tcpu_pending_ast;
147intcpu_type;
148intcpu_subtype;
149intcpu_threadtype;
150intcpu_running;
151uint32_trtclock_timer;
152boolean_tcpu_is64bit;
153volatile addr64_tcpu_active_cr3 __attribute((aligned(64)));
154union {
155volatile uint32_t cpu_tlb_invalid;
156struct {
157volatile uint16_t cpu_tlb_invalid_local;
158volatile uint16_t cpu_tlb_invalid_global;
159};
160};
161volatile task_map_tcpu_task_map;
162volatile addr64_tcpu_task_cr3;
163addr64_tcpu_kernel_cr3;
164cpu_uber_tcpu_uber;
165void*cpu_chud;
166void*cpu_console_buf;
167uint32_tlcpu;
168struct processor*cpu_processor;
169#if NCOPY_WINDOWS > 0
170struct cpu_pmap*cpu_pmap;
171#endif
172struct cpu_desc_table*cpu_desc_tablep;
173struct fake_descriptor*cpu_ldtp;
174cpu_desc_index_tcpu_desc_index;
175intcpu_ldt;
176#ifdef MACH_KDB
177/* XXX Untested: */
178intcpu_db_pass_thru;
179vm_offset_tcpu_db_stacks;
180void*cpu_kdb_saved_state;
181spl_tcpu_kdb_saved_ipl;
182intcpu_kdb_is_slave;
183intcpu_kdb_active;
184#endif /* MACH_KDB */
185boolean_tcpu_iflag;
186boolean_tcpu_boot_complete;
187intcpu_hibernate;
188#if NCOPY_WINDOWS > 0
189vm_offset_tcpu_copywindow_base;
190uint64_t*cpu_copywindow_pdp;
191
192vm_offset_tcpu_physwindow_base;
193uint64_t*cpu_physwindow_ptep;
194#endif
195void *cpu_hi_iss;
196
197#define HWINTCNT_SIZE 256
198uint32_tcpu_hwIntCnt[HWINTCNT_SIZE];/* Interrupt counts */
199uint64_tcpu_dr7; /* debug control register */
200uint64_tcpu_int_event_time;/* intr entry/exit time */
201#if CONFIG_VMX
202vmx_cpu_tcpu_vmx;/* wonderful world of virtualization */
203#endif
204#if CONFIG_MCA
205struct mca_state*cpu_mca_state;/* State at MC fault */
206#endif
207uint64_tcpu_uber_arg_store;/* Double mapped address
208 * of current thread's
209 * uu_arg array.
210 */
211uint64_tcpu_uber_arg_store_valid; /* Double mapped
212 * address of pcb
213 * arg store
214 * validity flag.
215 */
216void*cpu_nanotime;/* Nanotime info */
217thread_tcsw_old_thread;
218thread_tcsw_new_thread;
219#ifdefined(__x86_64__)
220uint32_tcpu_pmap_pcid_enabled;
221pcid_tcpu_active_pcid;
222pcid_tcpu_last_pcid;
223volatile pcid_ref_t*cpu_pmap_pcid_coherentp;
224volatile pcid_ref_t*cpu_pmap_pcid_coherentp_kernel;
225#definePMAP_PCID_MAX_PCID (0x1000)
226pcid_tcpu_pcid_free_hint;
227pcid_ref_tcpu_pcid_refcounts[PMAP_PCID_MAX_PCID];
228pmap_tcpu_pcid_last_pmap_dispatched[PMAP_PCID_MAX_PCID];
229#ifdefPCID_STATS
230uint64_tcpu_pmap_pcid_flushes;
231uint64_tcpu_pmap_pcid_preserves;
232#endif
233#endif /* x86_64 */
234uint64_t cpu_max_observed_int_latency;
235int cpu_max_observed_int_latency_vector;
236uint64_tdebugger_entry_time;
237volatile boolean_tcpu_NMI_acknowledged;
238/* A separate nested interrupt stack flag, to account
239 * for non-nested interrupts arriving while on the interrupt stack
240 * Currently only occurs when AICPM enables interrupts on the
241 * interrupt stack during processor offlining.
242 */
243uint32_tcpu_nested_istack;
244uint32_tcpu_nested_istack_events;
245void*cpu_fatal_trap_state;
246void*cpu_post_fatal_trap_state;
247} cpu_data_t;
248
249/* Macro to generate inline bodies to retrieve per-cpu data fields. */
250#ifndef offsetof
251#define offsetof(TYPE,MEMBER) ((size_t) &((TYPE *)0)->MEMBER)
252#endif /* offsetof */
253#define CPU_DATA_GET(member,type)\
254type ret;\
255__asm__ volatile ("mov %%gs:%P1,%0"\
256: "=r" (ret)\
257: "i" (offsetof(cpu_data_t,member)));\
258return ret;
259
260/*
261 * Everyone within the osfmk part of the kernel can use the fast
262 * inline versions of these routines. Everyone outside, must call
263 * the real thing,
264 */
265static inline thread_t
266get_active_thread(void)
267{
268CPU_DATA_GET(cpu_active_thread,thread_t)
269}
270#define current_thread_fast()get_active_thread()
271#define current_thread()current_thread_fast()
272
273#if defined(__i386__)
274static inline boolean_t
275get_is64bit(void)
276{
277CPU_DATA_GET(cpu_is64bit, boolean_t)
278}
279#define cpu_mode_is64bit()get_is64bit()
280#elif defined(__x86_64__)
281#define cpu_mode_is64bit()TRUE
282#endif
283
284static inline int
285get_preemption_level(void)
286{
287CPU_DATA_GET(cpu_preemption_level,int)
288}
289static inline int
290get_interrupt_level(void)
291{
292CPU_DATA_GET(cpu_interrupt_level,int)
293}
294static inline int
295get_cpu_number(void)
296{
297CPU_DATA_GET(cpu_number,int)
298}
299static inline int
300get_cpu_phys_number(void)
301{
302CPU_DATA_GET(cpu_phys_number,int)
303}
304
305static inline void
306disable_preemption(void)
307{
308__asm__ volatile ("incl %%gs:%P0"
309:
310: "i" (offsetof(cpu_data_t, cpu_preemption_level)));
311}
312#if UNUSED
313static inline void
314enable_preemption(void)
315{
316//assert(get_preemption_level() > 0);
317
318if (get_preemption_level() > 0){
319__asm__ volatile ("decl %%gs:%P0\n\t"
320 "jne 1f\n\t"
321 "call _kernel_preempt_check\n\t"
322 "1:"
323 : /* no outputs */
324 : "i" (offsetof(cpu_data_t, cpu_preemption_level))
325 : "eax", "ecx", "edx", "cc", "memory");
326}
327}
328#endif
329static inline void
330enable_preemption_no_check(void)
331{
332//assert(get_preemption_level() > 0);
333
334if (get_preemption_level() > 0){
335__asm__ volatile ("decl %%gs:%P0"
336 : /* no outputs */
337 : "i" (offsetof(cpu_data_t, cpu_preemption_level))
338 : "cc", "memory");
339}
340}
341
342static inline void
343mp_disable_preemption(void)
344{
345disable_preemption();
346}
347#if UNUSED
348static inline void
349mp_enable_preemption(void)
350{
351enable_preemption();
352}
353#endif
354static inline void
355mp_enable_preemption_no_check(void)
356{
357enable_preemption_no_check();
358}
359
360static inline cpu_data_t *
361current_cpu_datap(void)
362{
363CPU_DATA_GET(cpu_this, cpu_data_t *);
364}
365
366
367#endif/* I386_CPU_DATA */
368

Archive Download this file

Revision: 1468