1 | /*␊ |
2 | * Copyright (c) 2000-2008 Apple Inc. All rights reserved.␊ |
3 | *␊ |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@␊ |
5 | * ␊ |
6 | * This file contains Original Code and/or Modifications of Original Code␊ |
7 | * as defined in and that are subject to the Apple Public Source License␊ |
8 | * Version 2.0 (the 'License'). You may not use this file except in␊ |
9 | * compliance with the License. The rights granted to you under the License␊ |
10 | * may not be used to create, or enable the creation or redistribution of,␊ |
11 | * unlawful or unlicensed copies of an Apple operating system, or to␊ |
12 | * circumvent, violate, or enable the circumvention or violation of, any␊ |
13 | * terms of an Apple operating system software license agreement.␊ |
14 | * ␊ |
15 | * Please obtain a copy of the License at␊ |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file.␊ |
17 | * ␊ |
18 | * The Original Code and all software distributed under the License are␊ |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER␊ |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,␊ |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,␊ |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.␊ |
23 | * Please see the License for the specific language governing rights and␊ |
24 | * limitations under the License.␊ |
25 | * ␊ |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@␊ |
27 | */␊ |
28 | /*␊ |
29 | * @OSF_COPYRIGHT@␊ |
30 | * ␊ |
31 | */␊ |
32 | ␊ |
33 | #ifndef␉I386_CPU_DATA␊ |
34 | #define I386_CPU_DATA␊ |
35 | ␊ |
36 | #include <mach/i386/thread_status.h>␊ |
37 | #include <mach/i386/vm_param.h>␊ |
38 | ␊ |
39 | /* Extracted from pal_routine */␊ |
40 | struct pal_cpu_data; /* Defined per-platform */␊ |
41 | ␊ |
42 | struct pal_cpu_data {␊ |
43 | ␉␊ |
44 | };␊ |
45 | ␊ |
46 | /*␊ |
47 | * Data structures referenced (anonymously) from per-cpu data:␊ |
48 | */␊ |
49 | struct cpu_cons_buffer;␊ |
50 | struct cpu_desc_table;␊ |
51 | struct mca_state;␊ |
52 | ␊ |
53 | ␊ |
54 | ␊ |
55 | #if defined(__i386__)␊ |
56 | ␊ |
57 | typedef struct {␊ |
58 | ␉struct i386_tss *cdi_ktss;␊ |
59 | #if MACH_KDB␊ |
60 | ␉struct i386_tss *cdi_dbtss;␊ |
61 | #endif /* MACH_KDB */␊ |
62 | ␉struct __attribute__((packed)) {␊ |
63 | ␉␉uint16_t size;␊ |
64 | ␉␉struct fake_descriptor *ptr;␊ |
65 | ␉} cdi_gdt, cdi_idt;␊ |
66 | ␉struct fake_descriptor␉*cdi_ldt;␊ |
67 | ␉vm_offset_t␉␉␉␉cdi_sstk;␊ |
68 | } cpu_desc_index_t;␊ |
69 | ␊ |
70 | typedef enum {␊ |
71 | ␉TASK_MAP_32BIT,␉␉␉/* 32-bit, compatibility mode */ ␊ |
72 | ␉TASK_MAP_64BIT,␉␉␉/* 64-bit, separate address space */ ␊ |
73 | ␉TASK_MAP_64BIT_SHARED␉␉/* 64-bit, kernel-shared addr space */␊ |
74 | } task_map_t;␊ |
75 | ␊ |
76 | #elif defined(__x86_64__)␊ |
77 | ␊ |
78 | ␊ |
79 | typedef struct {␊ |
80 | ␉struct x86_64_tss␉␉*cdi_ktss;␊ |
81 | #if MACH_KDB␊ |
82 | ␉struct x86_64_tss␉␉*cdi_dbtss;␊ |
83 | #endif /* MACH_KDB */␊ |
84 | ␉struct __attribute__((packed)) {␊ |
85 | ␉␉uint16_t size;␊ |
86 | ␉␉void *ptr;␊ |
87 | ␉} cdi_gdt, cdi_idt;␊ |
88 | ␉struct fake_descriptor␉*cdi_ldt;␊ |
89 | ␉vm_offset_t␉␉␉␉cdi_sstk;␊ |
90 | } cpu_desc_index_t;␊ |
91 | ␊ |
92 | typedef enum {␊ |
93 | ␉TASK_MAP_32BIT,␉␉␉/* 32-bit user, compatibility mode */ ␊ |
94 | ␉TASK_MAP_64BIT,␉␉␉/* 64-bit user thread, shared space */ ␊ |
95 | } task_map_t;␊ |
96 | ␊ |
97 | #else␊ |
98 | #error Unsupported architecture␊ |
99 | #endif␊ |
100 | ␊ |
101 | /*␊ |
102 | * This structure is used on entry into the (uber-)kernel on syscall from␊ |
103 | * a 64-bit user. It contains the address of the machine state save area␊ |
104 | * for the current thread and a temporary place to save the user's rsp␊ |
105 | * before loading this address into rsp.␊ |
106 | */␊ |
107 | typedef struct {␊ |
108 | ␉addr64_t␉cu_isf;␉␉/* thread->pcb->iss.isf */␊ |
109 | ␉uint64_t␉cu_tmp;␉␉/* temporary scratch */␉␊ |
110 | addr64_t␉cu_user_gs_base;␊ |
111 | } cpu_uber_t;␊ |
112 | ␊ |
113 | /*␊ |
114 | * Per-cpu data.␊ |
115 | *␊ |
116 | * Each processor has a per-cpu data area which is dereferenced through the␊ |
117 | * current_cpu_datap() macro. For speed, the %gs segment is based here, and␊ |
118 | * using this, inlines provides single-instruction access to frequently used␊ |
119 | * members - such as get_cpu_number()/cpu_number(), and get_active_thread()/␊ |
120 | * current_thread(). ␊ |
121 | * ␊ |
122 | * Cpu data owned by another processor can be accessed using the␊ |
123 | * cpu_datap(cpu_number) macro which uses the cpu_data_ptr[] array of per-cpu␊ |
124 | * pointers.␊ |
125 | */␊ |
126 | typedef struct cpu_data␊ |
127 | {␊ |
128 | ␉struct pal_cpu_data␉cpu_pal_data;␉␉/* PAL-specific data */␊ |
129 | #define␉␉␉␉cpu_pd cpu_pal_data␉/* convenience alias */␊ |
130 | ␉struct cpu_data␉␉*cpu_this;␉␉/* pointer to myself */␊ |
131 | ␉thread_t␉␉cpu_active_thread;␊ |
132 | ␉int␉␉␉cpu_preemption_level;␊ |
133 | ␉int␉␉␉cpu_number;␉␉/* Logical CPU */␊ |
134 | ␉void␉␉␉*cpu_int_state;␉␉/* interrupt state */␊ |
135 | ␉vm_offset_t␉␉cpu_active_stack;␉/* kernel stack base */␊ |
136 | ␉vm_offset_t␉␉cpu_kernel_stack;␉/* kernel stack top */␊ |
137 | ␉vm_offset_t␉␉cpu_int_stack_top;␊ |
138 | ␉int␉␉␉cpu_interrupt_level;␊ |
139 | ␉int␉␉␉cpu_phys_number;␉/* Physical CPU */␊ |
140 | ␉uint32_t␉␉cpu_id;␉␉␉/* Platform Expert */␊ |
141 | ␉int␉␉␉cpu_signals;␉␉/* IPI events */␊ |
142 | ␉int␉␉␉cpu_prior_signals;␉/* Last set of events,␊ |
143 | ␉␉␉␉␉␉␉␉␉ * debugging␊ |
144 | ␉␉␉␉␉␉␉␉␉ */␊ |
145 | ␉int␉␉␉cpu_mcount_off;␉␉/* mcount recursion */␊ |
146 | ␉uint32_t␉␉␉cpu_pending_ast;␊ |
147 | ␉int␉␉␉cpu_type;␊ |
148 | ␉int␉␉␉cpu_subtype;␊ |
149 | ␉int␉␉␉cpu_threadtype;␊ |
150 | ␉int␉␉␉cpu_running;␊ |
151 | ␉uint32_t␉␉rtclock_timer;␊ |
152 | ␉boolean_t␉␉cpu_is64bit;␊ |
153 | ␉volatile addr64_t␉cpu_active_cr3 __attribute((aligned(64)));␊ |
154 | ␉union {␊ |
155 | ␉␉volatile uint32_t cpu_tlb_invalid;␊ |
156 | ␉␉struct {␊ |
157 | ␉␉␉volatile uint16_t cpu_tlb_invalid_local;␊ |
158 | ␉␉␉volatile uint16_t cpu_tlb_invalid_global;␊ |
159 | ␉␉};␊ |
160 | ␉};␊ |
161 | ␉volatile task_map_t␉cpu_task_map;␊ |
162 | ␉volatile addr64_t␉cpu_task_cr3;␊ |
163 | ␉addr64_t␉␉cpu_kernel_cr3;␊ |
164 | ␉cpu_uber_t␉␉cpu_uber;␊ |
165 | ␉void␉␉␉*cpu_chud;␊ |
166 | ␉void␉␉␉*cpu_console_buf;␊ |
167 | ␉uint32_t␉␉lcpu;␊ |
168 | ␉struct processor␉*cpu_processor;␊ |
169 | #if NCOPY_WINDOWS > 0␊ |
170 | ␉struct cpu_pmap␉␉*cpu_pmap;␊ |
171 | #endif␊ |
172 | ␉struct cpu_desc_table␉*cpu_desc_tablep;␊ |
173 | ␉struct fake_descriptor␉*cpu_ldtp;␊ |
174 | ␉cpu_desc_index_t␉cpu_desc_index;␊ |
175 | ␉int␉␉␉cpu_ldt;␊ |
176 | #ifdef MACH_KDB␊ |
177 | ␉/* XXX Untested: */␊ |
178 | ␉int␉␉␉cpu_db_pass_thru;␊ |
179 | ␉vm_offset_t␉␉cpu_db_stacks;␊ |
180 | ␉void␉␉␉*cpu_kdb_saved_state;␊ |
181 | ␉spl_t␉␉␉cpu_kdb_saved_ipl;␊ |
182 | ␉int␉␉␉cpu_kdb_is_slave;␊ |
183 | ␉int␉␉␉cpu_kdb_active;␊ |
184 | #endif /* MACH_KDB */␊ |
185 | ␉boolean_t␉␉cpu_iflag;␊ |
186 | ␉boolean_t␉␉cpu_boot_complete;␊ |
187 | ␉int␉␉␉cpu_hibernate;␊ |
188 | #if NCOPY_WINDOWS > 0␊ |
189 | ␉vm_offset_t␉␉cpu_copywindow_base;␊ |
190 | ␉uint64_t␉␉*cpu_copywindow_pdp;␊ |
191 | ␉␊ |
192 | ␉vm_offset_t␉␉cpu_physwindow_base;␊ |
193 | ␉uint64_t␉␉*cpu_physwindow_ptep;␊ |
194 | #endif␊ |
195 | ␉void ␉␉␉*cpu_hi_iss;␊ |
196 | ␉␊ |
197 | #define HWINTCNT_SIZE 256␊ |
198 | ␉uint32_t␉␉cpu_hwIntCnt[HWINTCNT_SIZE];␉/* Interrupt counts */␊ |
199 | ␉uint64_t␉␉cpu_dr7; /* debug control register */␊ |
200 | ␉uint64_t␉␉cpu_int_event_time;␉/* intr entry/exit time */␊ |
201 | #if CONFIG_VMX␊ |
202 | ␉vmx_cpu_t␉␉cpu_vmx;␉␉/* wonderful world of virtualization */␊ |
203 | #endif␊ |
204 | #if CONFIG_MCA␊ |
205 | ␉struct mca_state␉*cpu_mca_state;␉␉/* State at MC fault */␊ |
206 | #endif␊ |
207 | ␉uint64_t␉␉cpu_uber_arg_store;␉/* Double mapped address␊ |
208 | ␉␉␉␉␉␉␉␉␉␉ * of current thread's␊ |
209 | ␉␉␉␉␉␉␉␉␉␉ * uu_arg array.␊ |
210 | ␉␉␉␉␉␉␉␉␉␉ */␊ |
211 | ␉uint64_t␉␉cpu_uber_arg_store_valid; /* Double mapped␊ |
212 | ␉␉␉␉␉␉␉␉␉␉␉ * address of pcb␊ |
213 | ␉␉␉␉␉␉␉␉␉␉␉ * arg store␊ |
214 | ␉␉␉␉␉␉␉␉␉␉␉ * validity flag.␊ |
215 | ␉␉␉␉␉␉␉␉␉␉␉ */␊ |
216 | ␉void␉*cpu_nanotime;␉␉/* Nanotime info */␊ |
217 | ␉thread_t␉␉csw_old_thread;␊ |
218 | ␉thread_t␉␉csw_new_thread;␊ |
219 | #if␉defined(__x86_64__)␊ |
220 | ␉uint32_t␉␉cpu_pmap_pcid_enabled;␊ |
221 | ␉pcid_t␉␉␉cpu_active_pcid;␊ |
222 | ␉pcid_t␉␉␉cpu_last_pcid;␊ |
223 | ␉volatile pcid_ref_t␉*cpu_pmap_pcid_coherentp;␊ |
224 | ␉volatile pcid_ref_t␉*cpu_pmap_pcid_coherentp_kernel;␊ |
225 | #define␉PMAP_PCID_MAX_PCID (0x1000)␊ |
226 | ␉pcid_t␉␉␉cpu_pcid_free_hint;␊ |
227 | ␉pcid_ref_t␉␉cpu_pcid_refcounts[PMAP_PCID_MAX_PCID];␊ |
228 | ␉pmap_t␉␉␉cpu_pcid_last_pmap_dispatched[PMAP_PCID_MAX_PCID];␊ |
229 | #ifdef␉PCID_STATS␊ |
230 | ␉uint64_t␉␉cpu_pmap_pcid_flushes;␊ |
231 | ␉uint64_t␉␉cpu_pmap_pcid_preserves;␊ |
232 | #endif␊ |
233 | #endif /* x86_64 */␊ |
234 | ␉uint64_t cpu_max_observed_int_latency;␊ |
235 | ␉int cpu_max_observed_int_latency_vector;␊ |
236 | ␉uint64_t␉␉debugger_entry_time;␊ |
237 | ␉volatile boolean_t␉cpu_NMI_acknowledged;␊ |
238 | ␉/* A separate nested interrupt stack flag, to account␊ |
239 | ␉ * for non-nested interrupts arriving while on the interrupt stack␊ |
240 | ␉ * Currently only occurs when AICPM enables interrupts on the␊ |
241 | ␉ * interrupt stack during processor offlining.␊ |
242 | ␉ */␊ |
243 | ␉uint32_t␉␉cpu_nested_istack;␊ |
244 | ␉uint32_t␉␉cpu_nested_istack_events;␊ |
245 | ␉void␉*cpu_fatal_trap_state;␊ |
246 | ␉void␉*cpu_post_fatal_trap_state;␊ |
247 | } cpu_data_t;␊ |
248 | ␊ |
249 | /* Macro to generate inline bodies to retrieve per-cpu data fields. */␊ |
250 | #ifndef offsetof␊ |
251 | #define offsetof(TYPE,MEMBER) ((size_t) &((TYPE *)0)->MEMBER)␊ |
252 | #endif /* offsetof */␊ |
253 | #define CPU_DATA_GET(member,type)␉␉␉␉␉\␊ |
254 | ␉type ret;␉␉␉␉␉␉␉\␊ |
255 | ␉__asm__ volatile ("mov %%gs:%P1,%0"␉␉␉␉\␊ |
256 | ␉␉: "=r" (ret)␉␉␉␉␉␉\␊ |
257 | ␉␉: "i" (offsetof(cpu_data_t,member)));␉␉␉\␊ |
258 | ␉return ret;␊ |
259 | ␊ |
260 | /*␊ |
261 | * Everyone within the osfmk part of the kernel can use the fast␊ |
262 | * inline versions of these routines. Everyone outside, must call␊ |
263 | * the real thing,␊ |
264 | */␊ |
265 | static inline thread_t␊ |
266 | get_active_thread(void)␊ |
267 | {␊ |
268 | ␉CPU_DATA_GET(cpu_active_thread,thread_t)␊ |
269 | }␊ |
270 | #define current_thread_fast()␉␉get_active_thread()␊ |
271 | #define current_thread()␉␉current_thread_fast()␊ |
272 | ␊ |
273 | #if defined(__i386__)␊ |
274 | static inline boolean_t␊ |
275 | get_is64bit(void)␊ |
276 | {␊ |
277 | ␉CPU_DATA_GET(cpu_is64bit, boolean_t)␊ |
278 | }␊ |
279 | #define cpu_mode_is64bit()␉␉get_is64bit()␊ |
280 | #elif defined(__x86_64__)␊ |
281 | #define cpu_mode_is64bit()␉␉TRUE␊ |
282 | #endif␊ |
283 | ␊ |
284 | static inline int␊ |
285 | get_preemption_level(void)␊ |
286 | {␊ |
287 | ␉CPU_DATA_GET(cpu_preemption_level,int)␊ |
288 | }␊ |
289 | static inline int␊ |
290 | get_interrupt_level(void)␊ |
291 | {␊ |
292 | ␉CPU_DATA_GET(cpu_interrupt_level,int)␊ |
293 | }␊ |
294 | static inline int␊ |
295 | get_cpu_number(void)␊ |
296 | {␊ |
297 | ␉CPU_DATA_GET(cpu_number,int)␊ |
298 | }␊ |
299 | static inline int␊ |
300 | get_cpu_phys_number(void)␊ |
301 | {␊ |
302 | ␉CPU_DATA_GET(cpu_phys_number,int)␊ |
303 | }␊ |
304 | ␊ |
305 | static inline void␊ |
306 | disable_preemption(void)␊ |
307 | {␊ |
308 | ␉__asm__ volatile ("incl %%gs:%P0"␊ |
309 | ␉␉␉:␊ |
310 | ␉␉␉: "i" (offsetof(cpu_data_t, cpu_preemption_level)));␊ |
311 | }␊ |
312 | #if UNUSED␊ |
313 | static inline void␊ |
314 | enable_preemption(void)␊ |
315 | {␊ |
316 | ␉//assert(get_preemption_level() > 0);␊ |
317 | ␉␊ |
318 | ␉if (get_preemption_level() > 0){␊ |
319 | ␉␉__asm__ volatile ("decl %%gs:%P0␉␉\n\t"␊ |
320 | ␉␉␉␉␉␉ "jne 1f␉␉␉\n\t"␊ |
321 | ␉␉␉␉␉␉ "call _kernel_preempt_check␉\n\t"␊ |
322 | ␉␉␉␉␉␉ "1:"␊ |
323 | ␉␉␉␉␉␉ : /* no outputs */␊ |
324 | ␉␉␉␉␉␉ : "i" (offsetof(cpu_data_t, cpu_preemption_level))␊ |
325 | ␉␉␉␉␉␉ : "eax", "ecx", "edx", "cc", "memory");␊ |
326 | ␉␉}␊ |
327 | }␊ |
328 | #endif␊ |
329 | static inline void␊ |
330 | enable_preemption_no_check(void)␊ |
331 | {␊ |
332 | ␉//assert(get_preemption_level() > 0);␊ |
333 | ␉␊ |
334 | ␉if (get_preemption_level() > 0){␉␉␊ |
335 | ␉␉__asm__ volatile ("decl %%gs:%P0"␊ |
336 | ␉␉␉␉␉␉ : /* no outputs */␊ |
337 | ␉␉␉␉␉␉ : "i" (offsetof(cpu_data_t, cpu_preemption_level))␊ |
338 | ␉␉␉␉␉␉ : "cc", "memory");␊ |
339 | ␉}␉␊ |
340 | }␊ |
341 | ␊ |
342 | static inline void␊ |
343 | mp_disable_preemption(void)␊ |
344 | {␊ |
345 | ␉disable_preemption();␊ |
346 | }␊ |
347 | #if UNUSED␊ |
348 | static inline void␊ |
349 | mp_enable_preemption(void)␊ |
350 | {␊ |
351 | ␉enable_preemption();␊ |
352 | }␊ |
353 | #endif␊ |
354 | static inline void␊ |
355 | mp_enable_preemption_no_check(void)␊ |
356 | {␊ |
357 | ␉enable_preemption_no_check();␊ |
358 | }␊ |
359 | ␊ |
360 | static inline cpu_data_t *␊ |
361 | current_cpu_datap(void)␊ |
362 | {␊ |
363 | ␉CPU_DATA_GET(cpu_this, cpu_data_t *);␊ |
364 | }␊ |
365 | ␊ |
366 | ␊ |
367 | #endif␉/* I386_CPU_DATA */␊ |
368 | |