1 | /*␊ |
2 | * Copyright 2008 Islam Ahmed Zaid. All rights reserved. <azismed@gmail.com>␊ |
3 | * AsereBLN: 2009: cleanup and bugfix␊ |
4 | */␊ |
5 | ␊ |
6 | #include "libsaio.h"␊ |
7 | #include "platform.h"␊ |
8 | #include "cpu.h"␊ |
9 | #include "bootstruct.h"␊ |
10 | #include "boot.h"␊ |
11 | ␊ |
12 | #ifndef DEBUG_CPU␊ |
13 | #define DEBUG_CPU 0␊ |
14 | #endif␊ |
15 | ␊ |
16 | #if DEBUG_CPU␊ |
17 | #define DBG(x...)␉␉printf(x)␊ |
18 | #else␊ |
19 | #define DBG(x...)␉␉msglog(x)␊ |
20 | #endif␊ |
21 | ␊ |
22 | /*␊ |
23 | * timeRDTSC()␊ |
24 | * This routine sets up PIT counter 2 to count down 1/20 of a second.␊ |
25 | * It pauses until the value is latched in the counter␊ |
26 | * and then reads the time stamp counter to return to the caller.␊ |
27 | */␊ |
28 | uint64_t timeRDTSC(void)␊ |
29 | {␊ |
30 | ␉int␉␉attempts = 0;␊ |
31 | ␉uint64_t latchTime;␊ |
32 | ␉uint64_t␉saveTime,intermediate;␊ |
33 | ␉unsigned int timerValue, lastValue;␊ |
34 | ␉//boolean_t␉int_enabled;␊ |
35 | ␉/*␊ |
36 | ␉ * Table of correction factors to account for␊ |
37 | ␉ *␉ - timer counter quantization errors, and␊ |
38 | ␉ *␉ - undercounts 0..5␊ |
39 | ␉ */␊ |
40 | #define SAMPLE_CLKS_EXACT␉(((double) CLKNUM) / 20.0)␊ |
41 | #define SAMPLE_CLKS_INT␉␉((int) CLKNUM / 20)␊ |
42 | #define SAMPLE_NSECS␉␉(2000000000LL)␊ |
43 | #define SAMPLE_MULTIPLIER␉(((double)SAMPLE_NSECS)*SAMPLE_CLKS_EXACT)␊ |
44 | #define ROUND64(x)␉␉((uint64_t)((x) + 0.5))␊ |
45 | ␉uint64_t␉scale[6] = {␊ |
46 | ␉␉ROUND64(SAMPLE_MULTIPLIER/(double)(SAMPLE_CLKS_INT-0)), ␊ |
47 | ␉␉ROUND64(SAMPLE_MULTIPLIER/(double)(SAMPLE_CLKS_INT-1)), ␊ |
48 | ␉␉ROUND64(SAMPLE_MULTIPLIER/(double)(SAMPLE_CLKS_INT-2)), ␊ |
49 | ␉␉ROUND64(SAMPLE_MULTIPLIER/(double)(SAMPLE_CLKS_INT-3)), ␊ |
50 | ␉␉ROUND64(SAMPLE_MULTIPLIER/(double)(SAMPLE_CLKS_INT-4)), ␊ |
51 | ␉␉ROUND64(SAMPLE_MULTIPLIER/(double)(SAMPLE_CLKS_INT-5))␊ |
52 | ␉};␊ |
53 | ␊ |
54 | ␉//int_enabled = ml_set_interrupts_enabled(FALSE);␊ |
55 | ␊ |
56 | restart:␊ |
57 | ␉if (attempts >= 9) // increase to up to 9 attempts.␊ |
58 | ␉{␊ |
59 | ␉ // This will flash-reboot. TODO: Use tscPanic instead.␊ |
60 | ␉␉printf("Timestamp counter calibation failed with %d attempts\n", attempts);␊ |
61 | ␉}␊ |
62 | ␉attempts++;␊ |
63 | ␉enable_PIT2();␉␉// turn on PIT2␊ |
64 | ␉set_PIT2(0);␉␉// reset timer 2 to be zero␊ |
65 | ␉latchTime = rdtsc64();␉// get the time stamp to time ␊ |
66 | ␉latchTime = get_PIT2(&timerValue) - latchTime; // time how long this takes␊ |
67 | ␉set_PIT2(SAMPLE_CLKS_INT);␉// set up the timer for (almost) 1/20th a second␊ |
68 | ␉saveTime = rdtsc64();␉// now time how long a 20th a second is...␊ |
69 | ␉get_PIT2(&lastValue);␊ |
70 | ␉get_PIT2(&lastValue);␉// read twice, first value may be unreliable␊ |
71 | ␉do {␊ |
72 | ␉␉intermediate = get_PIT2(&timerValue);␊ |
73 | ␉␉if (timerValue > lastValue)␊ |
74 | ␉␉{␊ |
75 | ␉␉␉// Timer wrapped␊ |
76 | ␉␉␉set_PIT2(0);␊ |
77 | ␉␉␉disable_PIT2();␊ |
78 | ␉␉␉goto restart;␊ |
79 | ␉␉}␊ |
80 | ␉␉lastValue = timerValue;␊ |
81 | ␉} while (timerValue > 5);␊ |
82 | ␉printf("timerValue␉ %d\n",timerValue);␊ |
83 | ␉printf("intermediate 0x%016llx\n",intermediate);␊ |
84 | ␉printf("saveTime␉ 0x%016llx\n",saveTime);␊ |
85 | ␊ |
86 | ␉intermediate -= saveTime;␉␉// raw count for about 1/20 second␊ |
87 | ␉intermediate *= scale[timerValue];␉// rescale measured time spent␊ |
88 | ␉intermediate /= SAMPLE_NSECS;␉// so its exactly 1/20 a second␊ |
89 | ␉intermediate += latchTime;␉␉// add on our save fudge␊ |
90 | ␊ |
91 | ␉set_PIT2(0);␉␉␉// reset timer 2 to be zero␊ |
92 | ␉disable_PIT2();␉␉␉// turn off PIT 2␊ |
93 | ␉␊ |
94 | ␉//ml_set_interrupts_enabled(int_enabled);␊ |
95 | ␉return intermediate;␊ |
96 | }␊ |
97 | ␊ |
98 | /*␊ |
99 | * DFE: Measures the TSC frequency in Hz (64-bit) using the ACPI PM timer␊ |
100 | */␊ |
101 | static uint64_t measure_tsc_frequency(void)␊ |
102 | {␊ |
103 | ␉uint64_t tscStart;␊ |
104 | ␉uint64_t tscEnd;␊ |
105 | ␉uint64_t tscDelta = 0xffffffffffffffffULL;␊ |
106 | ␉unsigned long pollCount;␊ |
107 | ␉uint64_t retval = 0;␊ |
108 | ␉int i;␊ |
109 | ␊ |
110 | ␉/* Time how many TSC ticks elapse in 30 msec using the 8254 PIT␊ |
111 | ␉ * counter 2. We run this loop 3 times to make sure the cache␊ |
112 | ␉ * is hot and we take the minimum delta from all of the runs.␊ |
113 | ␉ * That is to say that we're biased towards measuring the minimum␊ |
114 | ␉ * number of TSC ticks that occur while waiting for the timer to␊ |
115 | ␉ * expire. That theoretically helps avoid inconsistencies when␊ |
116 | ␉ * running under a VM if the TSC is not virtualized and the host␊ |
117 | ␉ * steals time.␉ The TSC is normally virtualized for VMware.␊ |
118 | ␉ */␊ |
119 | ␉for(i = 0; i < 10; ++i)␊ |
120 | ␉{␊ |
121 | ␉␉enable_PIT2();␊ |
122 | ␉␉set_PIT2_mode0(CALIBRATE_LATCH);␊ |
123 | ␉␉tscStart = rdtsc64();␊ |
124 | ␉␉pollCount = poll_PIT2_gate();␊ |
125 | ␉␉tscEnd = rdtsc64();␊ |
126 | ␉␉/* The poll loop must have run at least a few times for accuracy */␊ |
127 | ␉␉if (pollCount <= 1) {␊ |
128 | ␉␉␉continue;␊ |
129 | ␉␉}␊ |
130 | ␉␉/* The TSC must increment at LEAST once every millisecond.␊ |
131 | ␉␉ * We should have waited exactly 30 msec so the TSC delta should␊ |
132 | ␉␉ * be >= 30. Anything less and the processor is way too slow.␊ |
133 | ␉␉ */␊ |
134 | ␉␉if ((tscEnd - tscStart) <= CALIBRATE_TIME_MSEC) {␊ |
135 | ␉␉␉continue;␊ |
136 | ␉␉}␊ |
137 | ␉␉// tscDelta = MIN(tscDelta, (tscEnd - tscStart))␊ |
138 | ␉␉if ( (tscEnd - tscStart) < tscDelta ) {␊ |
139 | ␉␉␉tscDelta = tscEnd - tscStart;␊ |
140 | ␉␉}␊ |
141 | ␉}␊ |
142 | ␉/* tscDelta is now the least number of TSC ticks the processor made in␊ |
143 | ␉ * a timespan of 0.03 s (e.g. 30 milliseconds)␊ |
144 | ␉ * Linux thus divides by 30 which gives the answer in kiloHertz because␊ |
145 | ␉ * 1 / ms = kHz. But we're xnu and most of the rest of the code uses␊ |
146 | ␉ * Hz so we need to convert our milliseconds to seconds. Since we're␊ |
147 | ␉ * dividing by the milliseconds, we simply multiply by 1000.␊ |
148 | ␉ */␊ |
149 | ␊ |
150 | ␉/* Unlike linux, we're not limited to 32-bit, but we do need to take care␊ |
151 | ␉ * that we're going to multiply by 1000 first so we do need at least some␊ |
152 | ␉ * arithmetic headroom. For now, 32-bit should be enough.␊ |
153 | ␉ * Also unlike Linux, our compiler can do 64-bit integer arithmetic.␊ |
154 | ␉ */␊ |
155 | ␉if (tscDelta > (1ULL<<32)) {␊ |
156 | ␉␉retval = 0;␊ |
157 | ␉} else {␊ |
158 | ␉␉retval = tscDelta * 1000 / 30;␊ |
159 | ␉}␊ |
160 | ␉disable_PIT2();␊ |
161 | ␉return retval;␊ |
162 | }␊ |
163 | ␊ |
164 | /*␊ |
165 | * Original comment/code:␊ |
166 | * "DFE: Measures the Max Performance Frequency in Hz (64-bit)"␊ |
167 | *␊ |
168 | * Measures the Actual Performance Frequency in Hz (64-bit)␊ |
169 | * (just a naming change, mperf --> aperf )␊ |
170 | */␊ |
171 | static uint64_t measure_aperf_frequency(void)␊ |
172 | {␊ |
173 | ␉uint64_t aperfStart;␊ |
174 | ␉uint64_t aperfEnd;␊ |
175 | ␉uint64_t aperfDelta = 0xffffffffffffffffULL;␊ |
176 | ␉unsigned long pollCount;␊ |
177 | ␉uint64_t retval = 0;␊ |
178 | ␉int i;␊ |
179 | ␊ |
180 | ␉/* Time how many APERF ticks elapse in 30 msec using the 8254 PIT␊ |
181 | ␉ * counter 2. We run this loop 3 times to make sure the cache␊ |
182 | ␉ * is hot and we take the minimum delta from all of the runs.␊ |
183 | ␉ * That is to say that we're biased towards measuring the minimum␊ |
184 | ␉ * number of APERF ticks that occur while waiting for the timer to␊ |
185 | ␉ * expire.␊ |
186 | ␉ */␊ |
187 | ␉for(i = 0; i < 10; ++i)␊ |
188 | ␉{␊ |
189 | ␉␉enable_PIT2();␊ |
190 | ␉␉set_PIT2_mode0(CALIBRATE_LATCH);␊ |
191 | ␉␉aperfStart = rdmsr64(MSR_AMD_APERF);␊ |
192 | ␉␉pollCount = poll_PIT2_gate();␊ |
193 | ␉␉aperfEnd = rdmsr64(MSR_AMD_APERF);␊ |
194 | ␉␉/* The poll loop must have run at least a few times for accuracy */␊ |
195 | ␉␉if (pollCount <= 1) {␊ |
196 | ␉␉␉continue;␊ |
197 | ␉␉}␊ |
198 | ␉␉/* The TSC must increment at LEAST once every millisecond.␊ |
199 | ␉␉ * We should have waited exactly 30 msec so the APERF delta should␊ |
200 | ␉␉ * be >= 30. Anything less and the processor is way too slow.␊ |
201 | ␉␉ */␊ |
202 | ␉␉if ((aperfEnd - aperfStart) <= CALIBRATE_TIME_MSEC) {␊ |
203 | ␉␉␉continue;␊ |
204 | ␉␉}␊ |
205 | ␉␉// tscDelta = MIN(tscDelta, (tscEnd - tscStart))␊ |
206 | ␉␉if ( (aperfEnd - aperfStart) < aperfDelta ) {␊ |
207 | ␉␉␉aperfDelta = aperfEnd - aperfStart;␊ |
208 | ␉␉}␊ |
209 | ␉}␊ |
210 | ␉/* mperfDelta is now the least number of MPERF ticks the processor made in␊ |
211 | ␉ * a timespan of 0.03 s (e.g. 30 milliseconds)␊ |
212 | ␉ */␊ |
213 | ␊ |
214 | ␉if (aperfDelta > (1ULL<<32)) {␊ |
215 | ␉␉retval = 0;␊ |
216 | ␉} else {␊ |
217 | ␉␉retval = aperfDelta * 1000 / 30;␊ |
218 | ␉}␊ |
219 | ␉disable_PIT2();␊ |
220 | ␉return retval;␊ |
221 | }␊ |
222 | ␊ |
223 | /*␊ |
224 | * Calculates the FSB and CPU frequencies using specific MSRs for each CPU␊ |
225 | * - multi. is read from a specific MSR. In the case of Intel, there is:␊ |
226 | *␉ a max multi. (used to calculate the FSB freq.),␊ |
227 | *␉ and a current multi. (used to calculate the CPU freq.)␊ |
228 | * - fsbFrequency = tscFrequency / multi␊ |
229 | * - cpuFrequency = fsbFrequency * multi␊ |
230 | */␊ |
231 | void scan_cpu(PlatformInfo_t *p)␊ |
232 | {␊ |
233 | ␉uint64_t␉tscFrequency = 0;␊ |
234 | ␉uint64_t␉fsbFrequency = 0;␊ |
235 | ␉uint64_t␉cpuFrequency = 0;␊ |
236 | ␉uint64_t␉msr = 0;␊ |
237 | ␉uint64_t␉flex_ratio = 0;␊ |
238 | ␉uint32_t␉max_ratio = 0;␊ |
239 | ␉uint32_t␉min_ratio = 0;␊ |
240 | ␉uint8_t␉␉bus_ratio_max = 0;␊ |
241 | ␉uint8_t␉␉currdiv = 0;␊ |
242 | ␉uint8_t␉␉currcoef = 0;␊ |
243 | ␉uint8_t␉␉maxdiv = 0;␊ |
244 | ␉uint8_t␉␉maxcoef = 0;␊ |
245 | ␉const char␉*newratio;␊ |
246 | ␉int␉␉len = 0;␊ |
247 | ␉int␉␉myfsb = 0;␊ |
248 | ␉uint8_t␉␉bus_ratio_min = 0;␊ |
249 | ␉uint32_t␉reg[4];␊ |
250 | ␉char␉␉str[128];␊ |
251 | ␊ |
252 | ␉/* get cpuid values */␊ |
253 | ␉do_cpuid(0x00000000, p->CPU.CPUID[CPUID_0]);␊ |
254 | ␉do_cpuid(0x00000001, p->CPU.CPUID[CPUID_1]);␊ |
255 | ␊ |
256 | ␉do_cpuid(0x00000002, p->CPU.CPUID[CPUID_2]);␊ |
257 | ␉do_cpuid(0x00000003, p->CPU.CPUID[CPUID_3]);␊ |
258 | ␉do_cpuid2(0x00000004, 0, p->CPU.CPUID[CPUID_4]);␊ |
259 | ␊ |
260 | ␉do_cpuid(0x80000000, p->CPU.CPUID[CPUID_80]);␊ |
261 | ␉if (p->CPU.CPUID[CPUID_0][0] >= 0x5) {␊ |
262 | ␉␉do_cpuid(5, p->CPU.CPUID[CPUID_5]);␊ |
263 | ␉}␊ |
264 | ␉if (p->CPU.CPUID[CPUID_0][0] >= 6) {␊ |
265 | ␉␉do_cpuid(6, p->CPU.CPUID[CPUID_6]);␊ |
266 | ␉}␊ |
267 | ␉if ((p->CPU.CPUID[CPUID_80][0] & 0x0000000f) >= 8) {␊ |
268 | ␉␉do_cpuid(0x80000008, p->CPU.CPUID[CPUID_88]);␊ |
269 | ␉␉do_cpuid(0x80000001, p->CPU.CPUID[CPUID_81]);␊ |
270 | ␉} else if ((p->CPU.CPUID[CPUID_80][0] & 0x0000000f) >= 1) {␊ |
271 | ␉␉do_cpuid(0x80000001, p->CPU.CPUID[CPUID_81]);␊ |
272 | ␉}␊ |
273 | ␊ |
274 | // #if DEBUG_CPU␊ |
275 | ␉{␊ |
276 | ␉␉int␉␉i;␊ |
277 | ␉␉DBG("CPUID Raw Values:\n");␊ |
278 | ␉␉for (i = 0; i < CPUID_MAX; i++) {␊ |
279 | ␉␉␉DBG("%02d: %08x-%08x-%08x-%08x\n", i,␊ |
280 | ␉␉␉␉ p->CPU.CPUID[i][0], p->CPU.CPUID[i][1],␊ |
281 | ␉␉␉␉ p->CPU.CPUID[i][2], p->CPU.CPUID[i][3]);␊ |
282 | ␉␉}␊ |
283 | ␉}␊ |
284 | // #endif␊ |
285 | ␊ |
286 | /*␊ |
287 | EAX (Intel):␊ |
288 | 31 28 27 20 19 16 1514 1312 11 8 7 4 3 0␊ |
289 | +--------+----------------+--------+----+----+--------+--------+--------+␊ |
290 | |########|Extended family |Extmodel|####|type|familyid| model |stepping|␊ |
291 | +--------+----------------+--------+----+----+--------+--------+--------+␊ |
292 | ␊ |
293 | EAX (AMD):␊ |
294 | 31 28 27 20 19 16 1514 1312 11 8 7 4 3 0␊ |
295 | +--------+----------------+--------+----+----+--------+--------+--------+␊ |
296 | |########|Extended family |Extmodel|####|####|familyid| model |stepping|␊ |
297 | +--------+----------------+--------+----+----+--------+--------+--------+␊ |
298 | */␊ |
299 | ␊ |
300 | ␉p->CPU.Vendor␉␉= p->CPU.CPUID[CPUID_0][1];␊ |
301 | ␉p->CPU.Signature␉= p->CPU.CPUID[CPUID_1][0];␊ |
302 | ␉p->CPU.Stepping␉␉= (uint8_t)bitfield(p->CPU.CPUID[CPUID_1][0], 3, 0); // stepping = cpu_feat_eax & 0xF;␊ |
303 | ␉p->CPU.Model␉␉= (uint8_t)bitfield(p->CPU.CPUID[CPUID_1][0], 7, 4); // model = (cpu_feat_eax >> 4) & 0xF;␊ |
304 | ␉p->CPU.Family␉␉= (uint8_t)bitfield(p->CPU.CPUID[CPUID_1][0], 11, 8); // family = (cpu_feat_eax >> 8) & 0xF;␊ |
305 | ␉//p->CPU.Type␉␉= (uint8_t)bitfield(p->CPU.CPUID[CPUID_1][0], 13, 12);␉// type = (cpu_feat_eax >> 12) & 0x3;␊ |
306 | ␉p->CPU.ExtModel␉␉= (uint8_t)bitfield(p->CPU.CPUID[CPUID_1][0], 19, 16); // ext_model = (cpu_feat_eax >> 16) & 0xF;␊ |
307 | ␉p->CPU.ExtFamily␉= (uint8_t)bitfield(p->CPU.CPUID[CPUID_1][0], 27, 20);␉// ext_family = (cpu_feat_eax >> 20) & 0xFF;␊ |
308 | ␊ |
309 | ␉p->CPU.Model += (p->CPU.ExtModel << 4);␊ |
310 | ␊ |
311 | ␉if (p->CPU.Vendor == CPUID_VENDOR_INTEL)␊ |
312 | ␉{␊ |
313 | ␉␉/*␊ |
314 | ␉␉ * Find the number of enabled cores and threads␊ |
315 | ␉␉ * (which determines whether SMT/Hyperthreading is active).␊ |
316 | ␉␉ */␊ |
317 | ␉␉switch (p->CPU.Model)␊ |
318 | ␉␉{␊ |
319 | ␉␉␉case CPU_MODEL_NEHALEM:␊ |
320 | ␉␉␉case CPU_MODEL_FIELDS:␊ |
321 | ␉␉␉case CPU_MODEL_DALES:␊ |
322 | ␉␉␉case CPU_MODEL_NEHALEM_EX:␊ |
323 | ␉␉␉case CPU_MODEL_JAKETOWN:␊ |
324 | ␉␉␉case CPU_MODEL_SANDYBRIDGE:␊ |
325 | ␉␉␉case CPU_MODEL_IVYBRIDGE:␊ |
326 | ␉␉␉case CPU_MODEL_HASWELL:␊ |
327 | ␉␉␉case CPU_MODEL_HASWELL_SVR:␊ |
328 | ␉␉␉//case CPU_MODEL_HASWELL_H:␊ |
329 | ␉␉␉case CPU_MODEL_HASWELL_ULT:␊ |
330 | ␉␉␉case CPU_MODEL_CRYSTALWELL:␊ |
331 | ␉␉␉␉msr = rdmsr64(MSR_CORE_THREAD_COUNT);␊ |
332 | ␉␉␉␉p->CPU.NoCores␉␉= (uint8_t)bitfield((uint32_t)msr, 31, 16);␊ |
333 | ␉␉␉␉p->CPU.NoThreads␉= (uint8_t)bitfield((uint32_t)msr, 15, 0);␊ |
334 | ␉␉␉␉break;␊ |
335 | ␊ |
336 | ␉␉␉case CPU_MODEL_DALES_32NM:␊ |
337 | ␉␉␉case CPU_MODEL_WESTMERE:␊ |
338 | ␉␉␉case CPU_MODEL_WESTMERE_EX:␊ |
339 | ␉␉␉␉msr = rdmsr64(MSR_CORE_THREAD_COUNT);␊ |
340 | ␉␉␉␉p->CPU.NoCores␉␉= (uint8_t)bitfield((uint32_t)msr, 19, 16);␊ |
341 | ␉␉␉␉p->CPU.NoThreads␉= (uint8_t)bitfield((uint32_t)msr, 15, 0);␊ |
342 | ␉␉␉␉break;␊ |
343 | ␊ |
344 | ␉␉␉default:␊ |
345 | ␉␉␉␉p->CPU.NoCores = 0;␊ |
346 | ␉␉␉␉break;␊ |
347 | ␉␉} // end switch␊ |
348 | ␉}␊ |
349 | ␊ |
350 | ␉if (p->CPU.NoCores == 0) {␊ |
351 | ␉␉p->CPU.NoCores␉␉= (uint8_t)(p->CPU.CoresPerPackage & 0xff);␊ |
352 | ␉␉p->CPU.NoThreads␉= (uint8_t)(p->CPU.LogicalPerPackage & 0xff);␊ |
353 | ␉}␊ |
354 | ␊ |
355 | ␉/* get BrandString (if supported) */␊ |
356 | ␉/* Copyright: from Apple's XNU cpuid.c */␊ |
357 | ␉if (p->CPU.CPUID[CPUID_80][0] > 0x80000004) {␊ |
358 | ␉␉char␉ *s;␊ |
359 | ␉␉bzero(str, 128);␊ |
360 | ␉␉/*␊ |
361 | ␉␉ * The BrandString 48 bytes (max), guaranteed to␊ |
362 | ␉␉ * be NULL terminated.␊ |
363 | ␉␉ */␊ |
364 | ␉␉do_cpuid(0x80000002, reg);␊ |
365 | ␉␉memcpy(&str[0], (char *)reg, 16);␊ |
366 | ␉␉do_cpuid(0x80000003, reg);␊ |
367 | ␉␉memcpy(&str[16], (char *)reg, 16);␊ |
368 | ␉␉do_cpuid(0x80000004, reg);␊ |
369 | ␉␉memcpy(&str[32], (char *)reg, 16);␊ |
370 | ␉␉for (s = str; *s != '\0'; s++) {␊ |
371 | ␉␉␉if (*s != ' ') { break; }␊ |
372 | ␉␉}␊ |
373 | ␉␉strlcpy(p->CPU.BrandString, s, 48);␊ |
374 | ␊ |
375 | ␉␉if (!strncmp(p->CPU.BrandString, CPU_STRING_UNKNOWN,␊ |
376 | MIN(sizeof(p->CPU.BrandString),␊ |
377 | strlen(CPU_STRING_UNKNOWN) + 1))) {␊ |
378 | ␉␉␉/*␊ |
379 | ␉␉␉ * This string means we have a firmware-programmable brand string,␊ |
380 | ␉␉␉ * and the firmware couldn't figure out what sort of CPU we have.␊ |
381 | ␉␉␉ */␊ |
382 | ␉␉␉p->CPU.BrandString[0] = '\0';␊ |
383 | ␉␉}␊ |
384 | ␉␉p->CPU.BrandString[47] = '\0';␊ |
385 | //␉␉DBG("Brandstring = %s\n", p->CPU.BrandString);␊ |
386 | ␉}␊ |
387 | ␊ |
388 | ␉//workaround for N270. I don't know why it detected wrong␊ |
389 | ␉// MSR is *NOT* available on the Intel Atom CPU␊ |
390 | ␉if ((p->CPU.Model == CPU_MODEL_ATOM) && (strstr(p->CPU.BrandString, "270"))) {␊ |
391 | ␉␉p->CPU.NoCores␉␉= 1;␊ |
392 | ␉␉p->CPU.NoThreads␉= 2;␊ |
393 | ␉}␊ |
394 | ␊ |
395 | ␉if (p->CPU.Vendor == CPUID_VENDOR_AMD) {␊ |
396 | ␉␉p->CPU.NoThreads␉= (uint8_t)bitfield(p->CPU.CPUID[CPUID_1][1], 23, 16);␊ |
397 | ␉␉p->CPU.NoCores␉␉= (uint8_t)bitfield(p->CPU.CPUID[CPUID_88][2], 7, 0) + 1;␊ |
398 | ␉}␊ |
399 | ␊ |
400 | ␉/* setup features */␊ |
401 | ␉if ((bit(23) & p->CPU.CPUID[CPUID_1][3]) != 0) {␊ |
402 | ␉␉p->CPU.Features |= CPU_FEATURE_MMX;␊ |
403 | ␉}␊ |
404 | ␉if ((bit(25) & p->CPU.CPUID[CPUID_1][3]) != 0) {␊ |
405 | ␉␉p->CPU.Features |= CPU_FEATURE_SSE;␊ |
406 | ␉}␊ |
407 | ␉if ((bit(26) & p->CPU.CPUID[CPUID_1][3]) != 0) {␊ |
408 | ␉␉p->CPU.Features |= CPU_FEATURE_SSE2;␊ |
409 | ␉}␊ |
410 | ␉if ((bit(0) & p->CPU.CPUID[CPUID_1][2]) != 0) {␊ |
411 | ␉␉p->CPU.Features |= CPU_FEATURE_SSE3;␊ |
412 | ␉}␊ |
413 | ␉if ((bit(19) & p->CPU.CPUID[CPUID_1][2]) != 0) {␊ |
414 | ␉␉p->CPU.Features |= CPU_FEATURE_SSE41;␊ |
415 | ␉}␊ |
416 | ␉if ((bit(20) & p->CPU.CPUID[CPUID_1][2]) != 0) {␊ |
417 | ␉␉p->CPU.Features |= CPU_FEATURE_SSE42;␊ |
418 | ␉}␊ |
419 | ␉if ((bit(29) & p->CPU.CPUID[CPUID_81][3]) != 0) {␊ |
420 | ␉␉p->CPU.Features |= CPU_FEATURE_EM64T;␊ |
421 | ␉}␊ |
422 | ␉if ((bit(5) & p->CPU.CPUID[CPUID_1][3]) != 0) {␊ |
423 | ␉␉p->CPU.Features |= CPU_FEATURE_MSR;␊ |
424 | ␉}␊ |
425 | ␉//if ((bit(28) & p->CPU.CPUID[CPUID_1][3]) != 0) {␊ |
426 | ␉if (p->CPU.NoThreads > p->CPU.NoCores) {␊ |
427 | ␉␉p->CPU.Features |= CPU_FEATURE_HTT;␊ |
428 | ␉}␊ |
429 | ␊ |
430 | ␉tscFrequency = measure_tsc_frequency();␊ |
431 | ␉DBG("cpu freq classic = 0x%016llx\n", tscFrequency);␊ |
432 | ␉/* if usual method failed */␊ |
433 | ␉if ( tscFrequency < 1000 ) { //TEST␊ |
434 | ␉␉tscFrequency = timeRDTSC() * 20;//measure_tsc_frequency();␊ |
435 | ␉␉// DBG("cpu freq timeRDTSC = 0x%016llx\n", tscFrequency);␊ |
436 | ␉} else {␊ |
437 | ␉␉// DBG("cpu freq timeRDTSC = 0x%016llxn", timeRDTSC() * 20);␊ |
438 | ␉}␊ |
439 | ␉fsbFrequency = 0;␊ |
440 | ␉cpuFrequency = 0;␊ |
441 | ␊ |
442 | ␉if (p->CPU.Vendor == CPUID_VENDOR_INTEL && ((p->CPU.Family == 0x06 && p->CPU.Model >= 0x0c) || (p->CPU.Family == 0x0f && p->CPU.Model >= 0x03))) {␊ |
443 | ␉␉int intelCPU = p->CPU.Model;␊ |
444 | ␉␉if (p->CPU.Family == 0x06) {␊ |
445 | ␉␉␉/* Nehalem CPU model */␊ |
446 | ␉␉␉switch (p->CPU.Model) {␊ |
447 | ␉␉␉␉case CPU_MODEL_NEHALEM:␊ |
448 | ␉␉␉␉case CPU_MODEL_FIELDS:␊ |
449 | ␉␉␉␉case CPU_MODEL_DALES:␊ |
450 | ␉␉␉␉case CPU_MODEL_DALES_32NM:␊ |
451 | ␉␉␉␉case CPU_MODEL_WESTMERE:␊ |
452 | ␉␉␉␉case CPU_MODEL_NEHALEM_EX:␊ |
453 | ␉␉␉␉case CPU_MODEL_WESTMERE_EX:␊ |
454 | /* --------------------------------------------------------- */␊ |
455 | ␉␉␉␉case CPU_MODEL_SANDYBRIDGE:␊ |
456 | ␉␉␉␉case CPU_MODEL_JAKETOWN:␊ |
457 | ␉␉␉␉case CPU_MODEL_IVYBRIDGE_XEON:␊ |
458 | ␉␉␉␉case CPU_MODEL_IVYBRIDGE:␊ |
459 | ␉␉␉␉case CPU_MODEL_HASWELL:␊ |
460 | ␉␉␉␉case CPU_MODEL_HASWELL_SVR:␊ |
461 | ␊ |
462 | ␉␉␉␉case CPU_MODEL_HASWELL_ULT:␊ |
463 | ␉␉␉␉case CPU_MODEL_CRYSTALWELL:␊ |
464 | /* --------------------------------------------------------- */␊ |
465 | ␉␉␉␉␉msr = rdmsr64(MSR_PLATFORM_INFO);␊ |
466 | ␉␉␉␉␉DBG("msr(%d): platform_info %08x\n", __LINE__, bitfield(msr, 31, 0));␊ |
467 | ␉␉␉␉␉bus_ratio_max = bitfield(msr, 15, 8);␊ |
468 | ␉␉␉␉␉bus_ratio_min = bitfield(msr, 47, 40); //valv: not sure about this one (Remarq.1)␊ |
469 | ␉␉␉␉␉msr = rdmsr64(MSR_FLEX_RATIO);␊ |
470 | ␉␉␉␉␉DBG("msr(%d): flex_ratio %08x\n", __LINE__, bitfield(msr, 31, 0));␊ |
471 | ␉␉␉␉␉if (bitfield(msr, 16, 16)) {␊ |
472 | ␉␉␉␉␉␉flex_ratio = bitfield(msr, 15, 8);␊ |
473 | ␉␉␉␉␉␉/* bcc9: at least on the gigabyte h67ma-ud2h,␊ |
474 | ␉␉␉␉␉␉ where the cpu multipler can't be changed to␊ |
475 | ␉␉␉␉␉␉ allow overclocking, the flex_ratio msr has unexpected (to OSX)␊ |
476 | ␉␉␉␉␉␉ contents.␉These contents cause mach_kernel to␊ |
477 | ␉␉␉␉␉␉ fail to compute the bus ratio correctly, instead␊ |
478 | ␉␉␉␉␉␉ causing the system to crash since tscGranularity␊ |
479 | ␉␉␉␉␉␉ is inadvertently set to 0.␊ |
480 | ␉␉␉␉␉␉ */␊ |
481 | ␉␉␉␉␉␉if (flex_ratio == 0) {␊ |
482 | ␉␉␉␉␉␉␉/* Clear bit 16 (evidently the presence bit) */␊ |
483 | ␉␉␉␉␉␉␉wrmsr64(MSR_FLEX_RATIO, (msr & 0xFFFFFFFFFFFEFFFFULL));␊ |
484 | ␉␉␉␉␉␉␉msr = rdmsr64(MSR_FLEX_RATIO);␊ |
485 | ␉␉␉␉␉␉␉DBG("Unusable flex ratio detected. Patched MSR now %08x\n", bitfield(msr, 31, 0));␊ |
486 | ␉␉␉␉␉␉} else {␊ |
487 | ␉␉␉␉␉␉␉if (bus_ratio_max > flex_ratio) {␊ |
488 | ␉␉␉␉␉␉␉␉bus_ratio_max = flex_ratio;␊ |
489 | ␉␉␉␉␉␉␉}␊ |
490 | ␉␉␉␉␉␉}␊ |
491 | ␉␉␉␉␉}␊ |
492 | ␊ |
493 | ␉␉␉␉␉if (bus_ratio_max) {␊ |
494 | ␉␉␉␉␉␉fsbFrequency = (tscFrequency / bus_ratio_max);␊ |
495 | ␉␉␉␉␉}␊ |
496 | ␉␉␉␉␉//valv: Turbo Ratio Limit␊ |
497 | ␉␉␉␉␉if ((intelCPU != 0x2e) && (intelCPU != 0x2f)) {␊ |
498 | ␉␉␉␉␉␉msr = rdmsr64(MSR_TURBO_RATIO_LIMIT);␊ |
499 | ␊ |
500 | ␉␉␉␉␉␉cpuFrequency = bus_ratio_max * fsbFrequency;␊ |
501 | ␉␉␉␉␉␉max_ratio = bus_ratio_max * 10;␊ |
502 | ␉␉␉␉␉} else {␊ |
503 | ␉␉␉␉␉␉cpuFrequency = tscFrequency;␊ |
504 | ␉␉␉␉␉}␊ |
505 | ␉␉␉␉␉if ((getValueForKey(kbusratio, &newratio, &len, &bootInfo->chameleonConfig)) && (len <= 4)) {␊ |
506 | ␉␉␉␉␉␉max_ratio = atoi(newratio);␊ |
507 | ␉␉␉␉␉␉max_ratio = (max_ratio * 10);␊ |
508 | ␉␉␉␉␉␉if (len >= 3) {␊ |
509 | ␉␉␉␉␉␉␉max_ratio = (max_ratio + 5);␊ |
510 | ␉␉␉␉␉␉}␊ |
511 | ␊ |
512 | ␉␉␉␉␉␉verbose("Bus-Ratio: min=%d, max=%s\n", bus_ratio_min, newratio);␊ |
513 | ␊ |
514 | ␉␉␉␉␉␉// extreme overclockers may love 320 ;)␊ |
515 | ␉␉␉␉␉␉if ((max_ratio >= min_ratio) && (max_ratio <= 320)) {␊ |
516 | ␉␉␉␉␉␉␉cpuFrequency = (fsbFrequency * max_ratio) / 10;␊ |
517 | ␉␉␉␉␉␉␉if (len >= 3) {␊ |
518 | ␉␉␉␉␉␉␉␉maxdiv = 1;␊ |
519 | ␉␉␉␉␉␉␉} else {␊ |
520 | ␉␉␉␉␉␉␉␉maxdiv = 0;␊ |
521 | ␉␉␉␉␉␉␉}␊ |
522 | ␉␉␉␉␉␉} else {␊ |
523 | ␉␉␉␉␉␉␉max_ratio = (bus_ratio_max * 10);␊ |
524 | ␉␉␉␉␉␉}␊ |
525 | ␉␉␉␉␉}␊ |
526 | ␉␉␉␉␉//valv: to be uncommented if Remarq.1 didn't stick␊ |
527 | ␉␉␉␉␉/*if (bus_ratio_max > 0) bus_ratio = flex_ratio;*/␊ |
528 | ␉␉␉␉␉p->CPU.MaxRatio = max_ratio;␊ |
529 | ␉␉␉␉␉p->CPU.MinRatio = min_ratio;␊ |
530 | ␊ |
531 | ␉␉␉␉myfsb = fsbFrequency / 1000000;␊ |
532 | ␉␉␉␉verbose("Sticking with [BCLK: %dMhz, Bus-Ratio: %d]\n", myfsb, max_ratio/10); // Bungo: fixed wrong Bus-Ratio readout␊ |
533 | ␉␉␉␉currcoef = bus_ratio_max;␊ |
534 | ␊ |
535 | ␉␉␉␉break;␊ |
536 | ␊ |
537 | ␉␉␉default:␊ |
538 | ␉␉␉␉msr = rdmsr64(MSR_IA32_PERF_STATUS);␊ |
539 | ␉␉␉␉DBG("msr(%d): ia32_perf_stat 0x%08x\n", __LINE__, bitfield(msr, 31, 0));␊ |
540 | ␉␉␉␉currcoef = bitfield(msr, 12, 8); // Bungo: reverted to 2263 state because of wrong old CPUs freq. calculating␊ |
541 | ␉␉␉␉/* Non-integer bus ratio for the max-multi*/␊ |
542 | ␉␉␉␉maxdiv = bitfield(msr, 46, 46);␊ |
543 | ␉␉␉␉/* Non-integer bus ratio for the current-multi (undocumented)*/␊ |
544 | ␉␉␉␉currdiv = bitfield(msr, 14, 14);␊ |
545 | ␊ |
546 | ␉␉␉␉// This will always be model >= 3␊ |
547 | ␉␉␉␉if ((p->CPU.Family == 0x06 && p->CPU.Model >= 0x0e) || (p->CPU.Family == 0x0f)) {␊ |
548 | ␉␉␉␉␉/* On these models, maxcoef defines TSC freq */␊ |
549 | ␉␉␉␉␉maxcoef = bitfield(msr, 44, 40);␊ |
550 | ␉␉␉␉} else {␊ |
551 | ␉␉␉␉␉/* On lower models, currcoef defines TSC freq */␊ |
552 | ␉␉␉␉␉/* XXX */␊ |
553 | ␉␉␉␉␉maxcoef = currcoef;␊ |
554 | ␉␉␉␉}␊ |
555 | ␊ |
556 | ␉␉␉␉if (maxcoef) {␊ |
557 | ␉␉␉␉␉if (maxdiv) {␊ |
558 | ␉␉␉␉␉␉fsbFrequency = ((tscFrequency * 2) / ((maxcoef * 2) + 1));␊ |
559 | ␉␉␉␉␉} else {␊ |
560 | ␉␉␉␉␉␉fsbFrequency = (tscFrequency / maxcoef);␊ |
561 | ␉␉␉␉␉}␊ |
562 | ␉␉␉␉␉if (currdiv) {␊ |
563 | ␉␉␉␉␉␉cpuFrequency = (fsbFrequency * ((currcoef * 2) + 1) / 2);␊ |
564 | ␉␉␉␉␉} else {␊ |
565 | ␉␉␉␉␉␉cpuFrequency = (fsbFrequency * currcoef);␊ |
566 | ␉␉␉␉␉}␊ |
567 | ␉␉␉␉␉DBG("max: %d%s current: %d%s\n", maxcoef, maxdiv ? ".5" : "",currcoef, currdiv ? ".5" : "");␊ |
568 | ␉␉␉␉}␊ |
569 | ␉␉␉␉break;␊ |
570 | ␉␉␉}␊ |
571 | ␉␉}␊ |
572 | ␉␉/* Mobile CPU */␊ |
573 | ␉␉if (rdmsr64(MSR_IA32_PLATFORM_ID) & (1<<28)) {␊ |
574 | ␉␉␉p->CPU.Features |= CPU_FEATURE_MOBILE;␊ |
575 | ␉␉}␊ |
576 | ␉} else if ((p->CPU.Vendor == CPUID_VENDOR_AMD) && (p->CPU.Family == 0x0f)) {␊ |
577 | ␉␉switch(p->CPU.ExtFamily) {␊ |
578 | ␉␉␉case 0x00: /* K8 */␊ |
579 | ␉␉␉␉msr = rdmsr64(K8_FIDVID_STATUS);␊ |
580 | ␉␉␉␉maxcoef = bitfield(msr, 21, 16) / 2 + 4;␊ |
581 | ␉␉␉␉currcoef = bitfield(msr, 5, 0) / 2 + 4;␊ |
582 | ␉␉␉␉break;␊ |
583 | ␊ |
584 | ␉␉␉case 0x01: /* K10 */␊ |
585 | ␉␉␉␉msr = rdmsr64(K10_COFVID_STATUS);␊ |
586 | ␉␉␉␉do_cpuid2(0x00000006, 0, p->CPU.CPUID[CPUID_6]);␊ |
587 | ␉␉␉␉// EffFreq: effective frequency interface␊ |
588 | ␉␉␉␉if (bitfield(p->CPU.CPUID[CPUID_6][2], 0, 0) == 1) {␊ |
589 | ␉␉␉␉␉//uint64_t mperf = measure_mperf_frequency();␊ |
590 | ␉␉␉␉␉uint64_t aperf = measure_aperf_frequency();␊ |
591 | ␉␉␉␉␉cpuFrequency = aperf;␊ |
592 | ␉␉␉␉}␊ |
593 | ␉␉␉␉// NOTE: tsc runs at the maccoeff (non turbo)␊ |
594 | ␉␉␉␉//␉␉␉*not* at the turbo frequency.␊ |
595 | ␉␉␉␉maxcoef␉ = bitfield(msr, 54, 49) / 2 + 4;␊ |
596 | ␉␉␉␉currcoef = bitfield(msr, 5, 0) + 0x10;␊ |
597 | ␉␉␉␉currdiv = 2 << bitfield(msr, 8, 6);␊ |
598 | ␊ |
599 | ␉␉␉␉break;␊ |
600 | ␊ |
601 | ␉␉␉case 0x05: /* K14 */␊ |
602 | ␉␉␉␉msr = rdmsr64(K10_COFVID_STATUS);␊ |
603 | ␉␉␉␉currcoef = (bitfield(msr, 54, 49) + 0x10) << 2;␊ |
604 | ␉␉␉␉currdiv = (bitfield(msr, 8, 4) + 1) << 2;␊ |
605 | ␉␉␉␉currdiv += bitfield(msr, 3, 0);␊ |
606 | ␊ |
607 | ␉␉␉␉break;␊ |
608 | ␊ |
609 | ␉␉␉case 0x02: /* K11 */␊ |
610 | ␉␉␉␉// not implimented␊ |
611 | ␉␉␉␉break;␊ |
612 | ␉␉}␊ |
613 | ␊ |
614 | ␉␉if (maxcoef) {␊ |
615 | ␉␉␉if (currdiv) {␊ |
616 | ␉␉␉␉if (!currcoef) {␊ |
617 | ␉␉␉␉␉currcoef = maxcoef;␊ |
618 | ␉␉␉␉}␊ |
619 | ␊ |
620 | ␉␉␉␉if (!cpuFrequency) {␊ |
621 | ␉␉␉␉␉fsbFrequency = ((tscFrequency * currdiv) / currcoef);␊ |
622 | ␉␉␉␉} else {␊ |
623 | ␉␉␉␉␉fsbFrequency = ((cpuFrequency * currdiv) / currcoef);␊ |
624 | ␉␉␉␉}␊ |
625 | ␉␉␉␉DBG("%d.%d\n", currcoef / currdiv, ((currcoef % currdiv) * 100) / currdiv);␊ |
626 | ␉␉␉} else {␊ |
627 | ␉␉␉␉if (!cpuFrequency) {␊ |
628 | ␉␉␉␉␉fsbFrequency = (tscFrequency / maxcoef);␊ |
629 | ␉␉␉␉} else {␊ |
630 | ␉␉␉␉␉fsbFrequency = (cpuFrequency / maxcoef);␊ |
631 | ␉␉␉␉}␊ |
632 | ␉␉␉␉DBG("%d\n", currcoef);␊ |
633 | ␉␉␉}␊ |
634 | ␉␉} else if (currcoef) {␊ |
635 | ␉␉␉if (currdiv) {␊ |
636 | ␉␉␉␉fsbFrequency = ((tscFrequency * currdiv) / currcoef);␊ |
637 | ␉␉␉␉DBG("%d.%d\n", currcoef / currdiv, ((currcoef % currdiv) * 100) / currdiv);␊ |
638 | ␉␉␉} else {␊ |
639 | ␉␉␉␉fsbFrequency = (tscFrequency / currcoef);␊ |
640 | ␉␉␉␉DBG("%d\n", currcoef);␊ |
641 | ␉␉␉}␊ |
642 | ␉␉}␊ |
643 | ␉␉if (!cpuFrequency) cpuFrequency = tscFrequency;␊ |
644 | ␉}␊ |
645 | ␉␊ |
646 | #if 0␊ |
647 | ␉if (!fsbFrequency)␊ |
648 | ␉{␊ |
649 | ␉␉fsbFrequency = (DEFAULT_FSB * 1000);␊ |
650 | ␉␉cpuFrequency = tscFrequency;␊ |
651 | ␉␉DBG("0 ! using the default value for FSB !\n");␊ |
652 | ␉}␊ |
653 | ␊ |
654 | ␉DBG("cpu freq = 0x%016llxn", timeRDTSC() * 20);␊ |
655 | ␊ |
656 | #endif␊ |
657 | ␊ |
658 | ␉p->CPU.MaxCoef = maxcoef;␊ |
659 | ␉p->CPU.MaxDiv = maxdiv;␊ |
660 | ␉p->CPU.CurrCoef = currcoef;␊ |
661 | ␉p->CPU.CurrDiv = currdiv;␊ |
662 | ␉p->CPU.TSCFrequency = tscFrequency;␊ |
663 | ␉p->CPU.FSBFrequency = fsbFrequency;␊ |
664 | ␉p->CPU.CPUFrequency = cpuFrequency;␊ |
665 | ␊ |
666 | ␉// keep formatted with spaces instead of tabs␊ |
667 | ␉DBG("\n---------------------------------------------\n");␊ |
668 | ␉DBG("--------------- CPU INFO ---------------\n");␊ |
669 | ␉DBG("---------------------------------------------\n");␊ |
670 | ␉DBG("Brand String: %s\n", p->CPU.BrandString); // Processor name (BIOS)␊ |
671 | ␉DBG("Vendor: 0x%x\n", p->CPU.Vendor); // Vendor ex: GenuineIntel␊ |
672 | ␉DBG("Family: 0x%x\n", p->CPU.Family); // Family ex: 6 (06h)␊ |
673 | ␉DBG("ExtFamily: 0x%x\n", p->CPU.ExtFamily);␊ |
674 | ␉DBG("Signature: %x\n", p->CPU.Signature); // CPUID signature␊ |
675 | ␉DBG("Model: 0x%x\n", p->CPU.Model); // Model ex: 37 (025h)␊ |
676 | ␉DBG("ExtModel: 0x%x\n", p->CPU.ExtModel);␊ |
677 | ␉DBG("Stepping: 0x%x\n", p->CPU.Stepping); // Stepping ex: 5 (05h)␊ |
678 | ␉DBG("MaxCoef: 0x%x\n", p->CPU.MaxCoef);␊ |
679 | ␉DBG("CurrCoef: 0x%x\n", p->CPU.CurrCoef);␊ |
680 | ␉DBG("MaxDiv: 0x%x\n", p->CPU.MaxDiv);␊ |
681 | ␉DBG("CurrDiv: 0x%x\n", p->CPU.CurrDiv);␊ |
682 | ␉DBG("TSCFreq: %dMHz\n", p->CPU.TSCFrequency / 1000000);␊ |
683 | ␉DBG("FSBFreq: %dMHz\n", p->CPU.FSBFrequency / 1000000);␊ |
684 | ␉DBG("CPUFreq: %dMHz\n", p->CPU.CPUFrequency / 1000000);␊ |
685 | ␉DBG("Cores: %d\n", p->CPU.NoCores); // Cores␊ |
686 | ␉DBG("Logical processor: %d\n", p->CPU.NoThreads); // Logical procesor␊ |
687 | ␉DBG("Features: 0x%08x\n", p->CPU.Features);␊ |
688 | ␊ |
689 | ␉DBG("\n---------------------------------------------\n");␊ |
690 | #if DEBUG_CPU␊ |
691 | ␉pause();␊ |
692 | #endif␊ |
693 | }␊ |
694 | |