1 | /*␊ |
2 | * Copyright (c) 2000-2008 Apple Inc. All rights reserved.␊ |
3 | *␊ |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@␊ |
5 | * ␊ |
6 | * This file contains Original Code and/or Modifications of Original Code␊ |
7 | * as defined in and that are subject to the Apple Public Source License␊ |
8 | * Version 2.0 (the 'License'). You may not use this file except in␊ |
9 | * compliance with the License. The rights granted to you under the License␊ |
10 | * may not be used to create, or enable the creation or redistribution of,␊ |
11 | * unlawful or unlicensed copies of an Apple operating system, or to␊ |
12 | * circumvent, violate, or enable the circumvention or violation of, any␊ |
13 | * terms of an Apple operating system software license agreement.␊ |
14 | * ␊ |
15 | * Please obtain a copy of the License at␊ |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file.␊ |
17 | * ␊ |
18 | * The Original Code and all software distributed under the License are␊ |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER␊ |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,␊ |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,␊ |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.␊ |
23 | * Please see the License for the specific language governing rights and␊ |
24 | * limitations under the License.␊ |
25 | * ␊ |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@␊ |
27 | */␊ |
28 | /*␊ |
29 | * @OSF_COPYRIGHT@␊ |
30 | * ␊ |
31 | */␊ |
32 | ␊ |
33 | #ifndef␉I386_CPU_DATA␊ |
34 | #define I386_CPU_DATA␊ |
35 | ␊ |
36 | #include <mach/i386/thread_status.h>␊ |
37 | #include <mach/i386/vm_param.h>␊ |
38 | ␊ |
39 | ␊ |
40 | /*␊ |
41 | * Data structures referenced (anonymously) from per-cpu data:␊ |
42 | */␊ |
43 | struct cpu_cons_buffer;␊ |
44 | struct cpu_desc_table;␊ |
45 | struct mca_state;␊ |
46 | ␊ |
47 | ␊ |
48 | ␊ |
49 | #if defined(__i386__)␊ |
50 | ␊ |
51 | typedef struct {␊ |
52 | ␉struct i386_tss *cdi_ktss;␊ |
53 | #if MACH_KDB␊ |
54 | ␉struct i386_tss *cdi_dbtss;␊ |
55 | #endif /* MACH_KDB */␊ |
56 | ␉struct __attribute__((packed)) {␊ |
57 | ␉␉uint16_t size;␊ |
58 | ␉␉struct fake_descriptor *ptr;␊ |
59 | ␉} cdi_gdt, cdi_idt;␊ |
60 | ␉struct fake_descriptor␉*cdi_ldt;␊ |
61 | ␉vm_offset_t␉␉␉␉cdi_sstk;␊ |
62 | } cpu_desc_index_t;␊ |
63 | ␊ |
64 | typedef enum {␊ |
65 | ␉TASK_MAP_32BIT,␉␉␉/* 32-bit, compatibility mode */ ␊ |
66 | ␉TASK_MAP_64BIT,␉␉␉/* 64-bit, separate address space */ ␊ |
67 | ␉TASK_MAP_64BIT_SHARED␉␉/* 64-bit, kernel-shared addr space */␊ |
68 | } task_map_t;␊ |
69 | ␊ |
70 | #elif defined(__x86_64__)␊ |
71 | ␊ |
72 | ␊ |
73 | typedef struct {␊ |
74 | ␉struct x86_64_tss␉␉*cdi_ktss;␊ |
75 | #if MACH_KDB␊ |
76 | ␉struct x86_64_tss␉␉*cdi_dbtss;␊ |
77 | #endif /* MACH_KDB */␊ |
78 | ␉struct __attribute__((packed)) {␊ |
79 | ␉␉uint16_t size;␊ |
80 | ␉␉void *ptr;␊ |
81 | ␉} cdi_gdt, cdi_idt;␊ |
82 | ␉struct fake_descriptor␉*cdi_ldt;␊ |
83 | ␉vm_offset_t␉␉␉␉cdi_sstk;␊ |
84 | } cpu_desc_index_t;␊ |
85 | ␊ |
86 | typedef enum {␊ |
87 | ␉TASK_MAP_32BIT,␉␉␉/* 32-bit user, compatibility mode */ ␊ |
88 | ␉TASK_MAP_64BIT,␉␉␉/* 64-bit user thread, shared space */ ␊ |
89 | } task_map_t;␊ |
90 | ␊ |
91 | #else␊ |
92 | #error Unsupported architecture␊ |
93 | #endif␊ |
94 | ␊ |
95 | /*␊ |
96 | * This structure is used on entry into the (uber-)kernel on syscall from␊ |
97 | * a 64-bit user. It contains the address of the machine state save area␊ |
98 | * for the current thread and a temporary place to save the user's rsp␊ |
99 | * before loading this address into rsp.␊ |
100 | */␊ |
101 | typedef struct {␊ |
102 | ␉addr64_t␉cu_isf;␉␉/* thread->pcb->iss.isf */␊ |
103 | ␉uint64_t␉cu_tmp;␉␉/* temporary scratch */␉␊ |
104 | addr64_t␉cu_user_gs_base;␊ |
105 | } cpu_uber_t;␊ |
106 | ␊ |
107 | /*␊ |
108 | * Per-cpu data.␊ |
109 | *␊ |
110 | * Each processor has a per-cpu data area which is dereferenced through the␊ |
111 | * current_cpu_datap() macro. For speed, the %gs segment is based here, and␊ |
112 | * using this, inlines provides single-instruction access to frequently used␊ |
113 | * members - such as get_cpu_number()/cpu_number(), and get_active_thread()/␊ |
114 | * current_thread(). ␊ |
115 | * ␊ |
116 | * Cpu data owned by another processor can be accessed using the␊ |
117 | * cpu_datap(cpu_number) macro which uses the cpu_data_ptr[] array of per-cpu␊ |
118 | * pointers.␊ |
119 | */␊ |
120 | typedef struct cpu_data␊ |
121 | {␊ |
122 | ␉struct cpu_data␉␉*cpu_this;␉␉/* pointer to myself */␊ |
123 | ␉thread_t␉␉cpu_active_thread;␊ |
124 | ␉void␉␉␉*cpu_int_state;␉␉/* interrupt state */␊ |
125 | ␉vm_offset_t␉␉cpu_active_stack;␉/* kernel stack base */␊ |
126 | ␉vm_offset_t␉␉cpu_kernel_stack;␉/* kernel stack top */␊ |
127 | ␉vm_offset_t␉␉cpu_int_stack_top;␊ |
128 | ␉int␉␉␉cpu_preemption_level;␊ |
129 | ␉int␉␉␉cpu_simple_lock_count;␊ |
130 | ␉int␉␉␉cpu_interrupt_level;␊ |
131 | ␉int␉␉␉cpu_number;␉␉/* Logical CPU */␊ |
132 | ␉int␉␉␉cpu_phys_number;␉/* Physical CPU */␊ |
133 | ␉uint32_t␉␉cpu_id;␉␉␉/* Platform Expert */␊ |
134 | ␉int␉␉␉cpu_signals;␉␉/* IPI events */␊ |
135 | ␉int␉␉␉cpu_prior_signals;␉/* Last set of events,␊ |
136 | ␉␉␉␉␉␉␉ * debugging␊ |
137 | ␉␉␉␉␉␉␉ */␊ |
138 | ␉int␉␉␉cpu_mcount_off;␉␉/* mcount recursion */␊ |
139 | ␉uint32_t␉␉␉cpu_pending_ast;␊ |
140 | ␉int␉␉␉cpu_type;␊ |
141 | ␉int␉␉␉cpu_subtype;␊ |
142 | ␉int␉␉␉cpu_threadtype;␊ |
143 | ␉int␉␉␉cpu_running;␊ |
144 | ␉uint32_t␉␉rtclock_timer;␊ |
145 | ␉boolean_t␉␉cpu_is64bit;␊ |
146 | ␉task_map_t␉␉cpu_task_map;␊ |
147 | ␉volatile addr64_t␉cpu_task_cr3;␊ |
148 | ␉volatile addr64_t␉cpu_active_cr3;␊ |
149 | ␉addr64_t␉␉cpu_kernel_cr3;␊ |
150 | ␉cpu_uber_t␉␉cpu_uber;␊ |
151 | ␉void␉␉␉*cpu_chud;␊ |
152 | ␉void␉␉␉*cpu_console_buf;␊ |
153 | ␉uint32_t␉␉lcpu;␊ |
154 | ␉struct processor␉*cpu_processor;␊ |
155 | #if NCOPY_WINDOWS > 0␊ |
156 | ␉struct cpu_pmap␉␉*cpu_pmap;␊ |
157 | #endif␊ |
158 | ␉struct cpu_desc_table␉*cpu_desc_tablep;␊ |
159 | ␉struct fake_descriptor␉*cpu_ldtp;␊ |
160 | ␉cpu_desc_index_t␉cpu_desc_index;␊ |
161 | ␉int␉␉␉cpu_ldt;␊ |
162 | #ifdef MACH_KDB␊ |
163 | ␉/* XXX Untested: */␊ |
164 | ␉int␉␉␉cpu_db_pass_thru;␊ |
165 | ␉vm_offset_t␉␉cpu_db_stacks;␊ |
166 | ␉void␉␉␉*cpu_kdb_saved_state;␊ |
167 | ␉spl_t␉␉␉cpu_kdb_saved_ipl;␊ |
168 | ␉int␉␉␉cpu_kdb_is_slave;␊ |
169 | ␉int␉␉␉cpu_kdb_active;␊ |
170 | #endif /* MACH_KDB */␊ |
171 | ␉boolean_t␉␉cpu_iflag;␊ |
172 | ␉boolean_t␉␉cpu_boot_complete;␊ |
173 | ␉int␉␉␉cpu_hibernate;␊ |
174 | ␊ |
175 | #if NCOPY_WINDOWS > 0␊ |
176 | ␉vm_offset_t␉␉cpu_copywindow_base;␊ |
177 | ␉uint64_t␉␉*cpu_copywindow_pdp;␊ |
178 | ␊ |
179 | ␉vm_offset_t␉␉cpu_physwindow_base;␊ |
180 | ␉uint64_t␉␉*cpu_physwindow_ptep;␊ |
181 | ␉void ␉␉␉*cpu_hi_iss;␊ |
182 | #endif␊ |
183 | ␊ |
184 | ␊ |
185 | ␊ |
186 | ␉volatile boolean_t␉cpu_tlb_invalid;␊ |
187 | ␉uint32_t␉␉cpu_hwIntCnt[256];␉/* Interrupt counts */␊ |
188 | ␉uint64_t␉␉cpu_dr7; /* debug control register */␊ |
189 | ␉uint64_t␉␉cpu_int_event_time;␉/* intr entry/exit time */␊ |
190 | #if CONFIG_VMX␊ |
191 | ␉vmx_cpu_t␉␉cpu_vmx;␉␉/* wonderful world of virtualization */␊ |
192 | #endif␊ |
193 | #if CONFIG_MCA␊ |
194 | ␉struct mca_state␉*cpu_mca_state;␉␉/* State at MC fault */␊ |
195 | #endif␊ |
196 | ␉uint64_t␉␉cpu_uber_arg_store;␉/* Double mapped address␊ |
197 | ␉␉␉␉␉␉␉ * of current thread's␊ |
198 | ␉␉␉␉␉␉␉ * uu_arg array.␊ |
199 | ␉␉␉␉␉␉␉ */␊ |
200 | ␉uint64_t␉␉cpu_uber_arg_store_valid; /* Double mapped␊ |
201 | ␉␉␉␉␉␉␉ * address of pcb␊ |
202 | ␉␉␉␉␉␉␉ * arg store␊ |
203 | ␉␉␉␉␉␉␉ * validity flag.␊ |
204 | ␉␉␉␉␉␉␉ */␊ |
205 | ␉void␉␉*cpu_nanotime;␉␉/* Nanotime info */␊ |
206 | ␉thread_t␉␉csw_old_thread;␊ |
207 | ␉thread_t␉␉csw_new_thread;␊ |
208 | ␉uint64_t␉␉cpu_max_observed_int_latency;␊ |
209 | ␉int␉␉␉cpu_max_observed_int_latency_vector;␊ |
210 | ␉uint64_t␉␉debugger_entry_time;␊ |
211 | ␉volatile boolean_t␉cpu_NMI_acknowledged;␊ |
212 | ␉/* A separate nested interrupt stack flag, to account␊ |
213 | ␉ * for non-nested interrupts arriving while on the interrupt stack␊ |
214 | ␉ * Currently only occurs when AICPM enables interrupts on the␊ |
215 | ␉ * interrupt stack during processor offlining.␊ |
216 | ␉ */␊ |
217 | ␉uint32_t␉␉cpu_nested_istack;␊ |
218 | ␉uint32_t␉␉cpu_nested_istack_events;␊ |
219 | } cpu_data_t;␊ |
220 | ␊ |
221 | /* Macro to generate inline bodies to retrieve per-cpu data fields. */␊ |
222 | #ifndef offsetof␊ |
223 | #define offsetof(TYPE,MEMBER) ((size_t) &((TYPE *)0)->MEMBER)␊ |
224 | #endif /* offsetof */␊ |
225 | #define CPU_DATA_GET(member,type)␉␉␉␉␉\␊ |
226 | ␉type ret;␉␉␉␉␉␉␉\␊ |
227 | ␉__asm__ volatile ("mov %%gs:%P1,%0"␉␉␉␉\␊ |
228 | ␉␉: "=r" (ret)␉␉␉␉␉␉\␊ |
229 | ␉␉: "i" (offsetof(cpu_data_t,member)));␉␉␉\␊ |
230 | ␉return ret;␊ |
231 | ␊ |
232 | /*␊ |
233 | * Everyone within the osfmk part of the kernel can use the fast␊ |
234 | * inline versions of these routines. Everyone outside, must call␊ |
235 | * the real thing,␊ |
236 | */␊ |
237 | static inline thread_t␊ |
238 | get_active_thread(void)␊ |
239 | {␊ |
240 | ␉CPU_DATA_GET(cpu_active_thread,thread_t)␊ |
241 | }␊ |
242 | #define current_thread_fast()␉␉get_active_thread()␊ |
243 | #define current_thread()␉␉current_thread_fast()␊ |
244 | ␊ |
245 | #if defined(__i386__)␊ |
246 | static inline boolean_t␊ |
247 | get_is64bit(void)␊ |
248 | {␊ |
249 | ␉CPU_DATA_GET(cpu_is64bit, boolean_t)␊ |
250 | }␊ |
251 | #define cpu_mode_is64bit()␉␉get_is64bit()␊ |
252 | #elif defined(__x86_64__)␊ |
253 | #define cpu_mode_is64bit()␉␉TRUE␊ |
254 | #endif␊ |
255 | ␊ |
256 | static inline int␊ |
257 | get_preemption_level(void)␊ |
258 | {␊ |
259 | ␉CPU_DATA_GET(cpu_preemption_level,int)␊ |
260 | }␊ |
261 | static inline int␊ |
262 | get_simple_lock_count(void)␊ |
263 | {␊ |
264 | ␉CPU_DATA_GET(cpu_simple_lock_count,int)␊ |
265 | }␊ |
266 | static inline int␊ |
267 | get_interrupt_level(void)␊ |
268 | {␊ |
269 | ␉CPU_DATA_GET(cpu_interrupt_level,int)␊ |
270 | }␊ |
271 | static inline int␊ |
272 | get_cpu_number(void)␊ |
273 | {␊ |
274 | ␉CPU_DATA_GET(cpu_number,int)␊ |
275 | }␊ |
276 | static inline int␊ |
277 | get_cpu_phys_number(void)␊ |
278 | {␊ |
279 | ␉CPU_DATA_GET(cpu_phys_number,int)␊ |
280 | }␊ |
281 | ␊ |
282 | static inline void␊ |
283 | disable_preemption(void)␊ |
284 | {␊ |
285 | ␉__asm__ volatile ("incl %%gs:%P0"␊ |
286 | ␉␉␉:␊ |
287 | ␉␉␉: "i" (offsetof(cpu_data_t, cpu_preemption_level)));␊ |
288 | }␊ |
289 | #if UNUSED␊ |
290 | static inline void␊ |
291 | enable_preemption(void)␊ |
292 | {␊ |
293 | ␉//assert(get_preemption_level() > 0);␊ |
294 | ␉␊ |
295 | ␉if (get_preemption_level() > 0){␊ |
296 | ␉␉__asm__ volatile ("decl %%gs:%P0␉␉\n\t"␊ |
297 | ␉␉␉␉␉␉ "jne 1f␉␉␉\n\t"␊ |
298 | ␉␉␉␉␉␉ "call _kernel_preempt_check␉\n\t"␊ |
299 | ␉␉␉␉␉␉ "1:"␊ |
300 | ␉␉␉␉␉␉ : /* no outputs */␊ |
301 | ␉␉␉␉␉␉ : "i" (offsetof(cpu_data_t, cpu_preemption_level))␊ |
302 | ␉␉␉␉␉␉ : "eax", "ecx", "edx", "cc", "memory");␊ |
303 | ␉␉}␊ |
304 | }␊ |
305 | #endif␊ |
306 | static inline void␊ |
307 | enable_preemption_no_check(void)␊ |
308 | {␊ |
309 | ␉//assert(get_preemption_level() > 0);␊ |
310 | ␉␊ |
311 | ␉if (get_preemption_level() > 0){␉␉␊ |
312 | ␉␉__asm__ volatile ("decl %%gs:%P0"␊ |
313 | ␉␉␉␉␉␉ : /* no outputs */␊ |
314 | ␉␉␉␉␉␉ : "i" (offsetof(cpu_data_t, cpu_preemption_level))␊ |
315 | ␉␉␉␉␉␉ : "cc", "memory");␊ |
316 | ␉}␉␊ |
317 | }␊ |
318 | ␊ |
319 | static inline void␊ |
320 | mp_disable_preemption(void)␊ |
321 | {␊ |
322 | ␉disable_preemption();␊ |
323 | }␊ |
324 | #if UNUSED␊ |
325 | static inline void␊ |
326 | mp_enable_preemption(void)␊ |
327 | {␊ |
328 | ␉enable_preemption();␊ |
329 | }␊ |
330 | #endif␊ |
331 | static inline void␊ |
332 | mp_enable_preemption_no_check(void)␊ |
333 | {␊ |
334 | ␉enable_preemption_no_check();␊ |
335 | }␊ |
336 | ␊ |
337 | static inline cpu_data_t *␊ |
338 | current_cpu_datap(void)␊ |
339 | {␊ |
340 | ␉CPU_DATA_GET(cpu_this, cpu_data_t *);␊ |
341 | }␊ |
342 | ␊ |
343 | ␊ |
344 | #endif␉/* I386_CPU_DATA */␊ |
345 | |