1 | /*␊ |
2 | * Copyright 2008 Islam Ahmed Zaid. All rights reserved. <azismed@gmail.com>␊ |
3 | * AsereBLN: 2009: cleanup and bugfix␊ |
4 | * Bronya: 2015 Improve AMD support, cleanup and bugfix␊ |
5 | */␊ |
6 | ␊ |
7 | #include "libsaio.h"␊ |
8 | #include "platform.h"␊ |
9 | #include "cpu.h"␊ |
10 | #include "bootstruct.h"␊ |
11 | #include "boot.h"␊ |
12 | ␊ |
13 | #ifndef DEBUG_CPU␊ |
14 | ␉#define DEBUG_CPU 0␊ |
15 | #endif␊ |
16 | ␊ |
17 | #if DEBUG_CPU␊ |
18 | ␉#define DBG(x...)␉␉printf(x)␊ |
19 | #else␊ |
20 | ␉#define DBG(x...)␊ |
21 | #endif␊ |
22 | ␊ |
23 | ␊ |
24 | #define UI_CPUFREQ_ROUNDING_FACTOR␉10000000␊ |
25 | ␊ |
26 | clock_frequency_info_t gPEClockFrequencyInfo;␊ |
27 | ␊ |
28 | static __unused uint64_t rdtsc32(void)␊ |
29 | {␊ |
30 | ␉unsigned int lo,hi;␊ |
31 | ␉__asm__ __volatile__ ("rdtsc" : "=a" (lo), "=d" (hi));␊ |
32 | ␉return ((uint64_t)hi << 32) | lo;␊ |
33 | }␊ |
34 | ␊ |
35 | /*␊ |
36 | * timeRDTSC()␊ |
37 | * This routine sets up PIT counter 2 to count down 1/20 of a second.␊ |
38 | * It pauses until the value is latched in the counter␊ |
39 | * and then reads the time stamp counter to return to the caller.␊ |
40 | */␊ |
41 | static uint64_t timeRDTSC(void)␊ |
42 | {␊ |
43 | ␉int␉␉attempts = 0;␊ |
44 | ␉uint32_t ␉latchTime;␊ |
45 | ␉uint64_t␉saveTime,intermediate;␊ |
46 | ␉unsigned int␉timerValue, lastValue;␊ |
47 | ␉//boolean_t␉int_enabled;␊ |
48 | ␉/*␊ |
49 | ␉ * Table of correction factors to account for␊ |
50 | ␉ *␉ - timer counter quantization errors, and␊ |
51 | ␉ *␉ - undercounts 0..5␊ |
52 | ␉ */␊ |
53 | #define SAMPLE_CLKS_EXACT␉(((double) CLKNUM) / 20.0)␊ |
54 | #define SAMPLE_CLKS_INT␉␉((int) CLKNUM / 20)␊ |
55 | #define SAMPLE_NSECS␉␉(2000000000LL)␊ |
56 | #define SAMPLE_MULTIPLIER␉(((double)SAMPLE_NSECS)*SAMPLE_CLKS_EXACT)␊ |
57 | #define ROUND64(x)␉␉((uint64_t)((x) + 0.5))␊ |
58 | ␉uint64_t␉scale[6] = {␊ |
59 | ␉␉ROUND64(SAMPLE_MULTIPLIER/(double)(SAMPLE_CLKS_INT-0)), ␊ |
60 | ␉␉ROUND64(SAMPLE_MULTIPLIER/(double)(SAMPLE_CLKS_INT-1)), ␊ |
61 | ␉␉ROUND64(SAMPLE_MULTIPLIER/(double)(SAMPLE_CLKS_INT-2)), ␊ |
62 | ␉␉ROUND64(SAMPLE_MULTIPLIER/(double)(SAMPLE_CLKS_INT-3)), ␊ |
63 | ␉␉ROUND64(SAMPLE_MULTIPLIER/(double)(SAMPLE_CLKS_INT-4)), ␊ |
64 | ␉␉ROUND64(SAMPLE_MULTIPLIER/(double)(SAMPLE_CLKS_INT-5))␊ |
65 | ␉};␊ |
66 | ␊ |
67 | ␉//int_enabled = ml_set_interrupts_enabled(false);␊ |
68 | ␊ |
69 | restart:␊ |
70 | ␉if (attempts >= 3) // increase to up to 9 attempts.␊ |
71 | ␉{␊ |
72 | ␉␉// This will flash-reboot. TODO: Use tscPanic instead.␊ |
73 | ␉␉//printf("Timestamp counter calibation failed with %d attempts\n", attempts);␊ |
74 | ␉}␊ |
75 | ␉attempts++;␊ |
76 | ␉enable_PIT2();␉␉// turn on PIT2␊ |
77 | ␉set_PIT2(0);␉␉// reset timer 2 to be zero␊ |
78 | ␉latchTime = rdtsc32();␉// get the time stamp to time␊ |
79 | ␉latchTime = get_PIT2(&timerValue) - latchTime; // time how long this takes␊ |
80 | ␉set_PIT2(SAMPLE_CLKS_INT);␉// set up the timer for (almost) 1/20th a second␊ |
81 | ␉saveTime = rdtsc32();␉// now time how long a 20th a second is...␊ |
82 | ␉get_PIT2(&lastValue);␊ |
83 | ␉get_PIT2(&lastValue);␉// read twice, first value may be unreliable␊ |
84 | ␉do {␊ |
85 | ␉␉intermediate = get_PIT2(&timerValue);␊ |
86 | ␉␉if (timerValue > lastValue)␊ |
87 | ␉␉{␊ |
88 | ␉␉␉// Timer wrapped␊ |
89 | ␉␉␉set_PIT2(0);␊ |
90 | ␉␉␉disable_PIT2();␊ |
91 | ␉␉␉goto restart;␊ |
92 | ␉␉}␊ |
93 | ␉␉lastValue = timerValue;␊ |
94 | ␉} while (timerValue > 5);␊ |
95 | ␉//printf("timerValue␉ %d\n",timerValue);␊ |
96 | ␉//printf("intermediate 0x%016llX\n",intermediate);␊ |
97 | ␉//printf("saveTime␉ 0x%016llX\n",saveTime);␊ |
98 | ␊ |
99 | ␉intermediate -= saveTime;␉␉// raw count for about 1/20 second␊ |
100 | ␉intermediate *= scale[timerValue];␉// rescale measured time spent␊ |
101 | ␉intermediate /= SAMPLE_NSECS;␉// so its exactly 1/20 a second␊ |
102 | ␉intermediate += latchTime;␉␉// add on our save fudge␊ |
103 | ␊ |
104 | ␉set_PIT2(0);␉␉␉// reset timer 2 to be zero␊ |
105 | ␉disable_PIT2();␉␉␉// turn off PIT 2␊ |
106 | ␊ |
107 | ␉//ml_set_interrupts_enabled(int_enabled);␊ |
108 | ␉return intermediate;␊ |
109 | }␊ |
110 | ␊ |
111 | /*␊ |
112 | * DFE: Measures the TSC frequency in Hz (64-bit) using the ACPI PM timer␊ |
113 | */␊ |
114 | static uint64_t __unused measure_tsc_frequency(void)␊ |
115 | {␊ |
116 | ␉uint64_t tscStart;␊ |
117 | ␉uint64_t tscEnd;␊ |
118 | ␉uint64_t tscDelta = 0xffffffffffffffffULL;␊ |
119 | ␉unsigned long pollCount;␊ |
120 | ␉uint64_t retval = 0;␊ |
121 | ␉int i;␊ |
122 | ␊ |
123 | ␉/* Time how many TSC ticks elapse in 30 msec using the 8254 PIT␊ |
124 | ␉ * counter 2. We run this loop 3 times to make sure the cache␊ |
125 | ␉ * is hot and we take the minimum delta from all of the runs.␊ |
126 | ␉ * That is to say that we're biased towards measuring the minimum␊ |
127 | ␉ * number of TSC ticks that occur while waiting for the timer to␊ |
128 | ␉ * expire. That theoretically helps avoid inconsistencies when␊ |
129 | ␉ * running under a VM if the TSC is not virtualized and the host␊ |
130 | ␉ * steals time.␉ The TSC is normally virtualized for VMware.␊ |
131 | ␉ */␊ |
132 | ␉for(i = 0; i < 10; ++i)␊ |
133 | ␉{␊ |
134 | ␉␉enable_PIT2();␊ |
135 | ␉␉set_PIT2_mode0(CALIBRATE_LATCH);␊ |
136 | ␉␉tscStart = rdtsc64();␊ |
137 | ␉␉pollCount = poll_PIT2_gate();␊ |
138 | ␉␉tscEnd = rdtsc64();␊ |
139 | ␉␉/* The poll loop must have run at least a few times for accuracy */␊ |
140 | ␉␉if (pollCount <= 1)␊ |
141 | ␉␉{␊ |
142 | ␉␉␉continue;␊ |
143 | ␉␉}␊ |
144 | ␉␉/* The TSC must increment at LEAST once every millisecond.␊ |
145 | ␉␉ * We should have waited exactly 30 msec so the TSC delta should␊ |
146 | ␉␉ * be >= 30. Anything less and the processor is way too slow.␊ |
147 | ␉␉ */␊ |
148 | ␉␉if ((tscEnd - tscStart) <= CALIBRATE_TIME_MSEC)␊ |
149 | ␉␉{␊ |
150 | ␉␉␉continue;␊ |
151 | ␉␉}␊ |
152 | ␉␉// tscDelta = MIN(tscDelta, (tscEnd - tscStart))␊ |
153 | ␉␉if ( (tscEnd - tscStart) < tscDelta )␊ |
154 | ␉␉{␊ |
155 | ␉␉␉tscDelta = tscEnd - tscStart;␊ |
156 | ␉␉}␊ |
157 | ␉}␊ |
158 | ␉/* tscDelta is now the least number of TSC ticks the processor made in␊ |
159 | ␉ * a timespan of 0.03 s (e.g. 30 milliseconds)␊ |
160 | ␉ * Linux thus divides by 30 which gives the answer in kiloHertz because␊ |
161 | ␉ * 1 / ms = kHz. But we're xnu and most of the rest of the code uses␊ |
162 | ␉ * Hz so we need to convert our milliseconds to seconds. Since we're␊ |
163 | ␉ * dividing by the milliseconds, we simply multiply by 1000.␊ |
164 | ␉ */␊ |
165 | ␊ |
166 | ␉/* Unlike linux, we're not limited to 32-bit, but we do need to take care␊ |
167 | ␉ * that we're going to multiply by 1000 first so we do need at least some␊ |
168 | ␉ * arithmetic headroom. For now, 32-bit should be enough.␊ |
169 | ␉ * Also unlike Linux, our compiler can do 64-bit integer arithmetic.␊ |
170 | ␉ */␊ |
171 | ␉if (tscDelta > (1ULL<<32))␊ |
172 | ␉{␊ |
173 | ␉␉retval = 0;␊ |
174 | ␉}␊ |
175 | ␉else␊ |
176 | ␉{␊ |
177 | ␉␉retval = tscDelta * 1000 / 30;␊ |
178 | ␉}␊ |
179 | ␉disable_PIT2();␊ |
180 | ␉return retval;␊ |
181 | }␊ |
182 | ␊ |
183 | static uint64_t␉rtc_set_cyc_per_sec(uint64_t cycles);␊ |
184 | #define RTC_FAST_DENOM␉0xFFFFFFFF␊ |
185 | ␊ |
186 | inline static uint32_t␊ |
187 | create_mul_quant_GHZ(int shift, uint32_t quant)␊ |
188 | {␊ |
189 | ␉return (uint32_t)((((uint64_t)NSEC_PER_SEC/20) << shift) / quant);␊ |
190 | }␊ |
191 | ␊ |
192 | struct␉{␊ |
193 | ␉mach_timespec_t␉␉␉calend_offset;␊ |
194 | ␉boolean_t␉␉␉calend_is_set;␊ |
195 | ␊ |
196 | ␉int64_t␉␉␉␉calend_adjtotal;␊ |
197 | ␉int32_t␉␉␉␉calend_adjdelta;␊ |
198 | ␊ |
199 | ␉uint32_t␉␉␉boottime;␊ |
200 | ␊ |
201 | ␉mach_timebase_info_data_t␉timebase_const;␊ |
202 | ␊ |
203 | ␉decl_simple_lock_data(,lock)␉/* real-time clock device lock */␊ |
204 | } rtclock;␊ |
205 | ␊ |
206 | uint32_t␉␉rtc_quant_shift;␉/* clock to nanos right shift */␊ |
207 | uint32_t␉␉rtc_quant_scale;␉/* clock to nanos multiplier */␊ |
208 | uint64_t␉␉rtc_cyc_per_sec;␉/* processor cycles per sec */␊ |
209 | uint64_t␉␉rtc_cycle_count;␉/* clocks in 1/20th second */␊ |
210 | ␊ |
211 | static uint64_t rtc_set_cyc_per_sec(uint64_t cycles)␊ |
212 | {␊ |
213 | ␊ |
214 | ␉if (cycles > (NSEC_PER_SEC/20))␊ |
215 | ␉{␊ |
216 | ␉␉// we can use just a "fast" multiply to get nanos␊ |
217 | ␉␉rtc_quant_shift = 32;␊ |
218 | ␉␉rtc_quant_scale = create_mul_quant_GHZ(rtc_quant_shift, (uint32_t)cycles);␊ |
219 | ␉␉rtclock.timebase_const.numer = rtc_quant_scale; // timeRDTSC is 1/20␊ |
220 | ␉␉rtclock.timebase_const.denom = (uint32_t)RTC_FAST_DENOM;␊ |
221 | ␉}␊ |
222 | ␉else␊ |
223 | ␉{␊ |
224 | ␉␉rtc_quant_shift = 26;␊ |
225 | ␉␉rtc_quant_scale = create_mul_quant_GHZ(rtc_quant_shift, (uint32_t)cycles);␊ |
226 | ␉␉rtclock.timebase_const.numer = NSEC_PER_SEC/20; // timeRDTSC is 1/20␊ |
227 | ␉␉rtclock.timebase_const.denom = (uint32_t)cycles;␊ |
228 | ␉}␊ |
229 | ␉rtc_cyc_per_sec = cycles*20;␉// multiply it by 20 and we are done..␊ |
230 | ␉// BUT we also want to calculate...␊ |
231 | ␊ |
232 | ␉cycles = ((rtc_cyc_per_sec + (UI_CPUFREQ_ROUNDING_FACTOR/2))␊ |
233 | / UI_CPUFREQ_ROUNDING_FACTOR)␊ |
234 | ␉* UI_CPUFREQ_ROUNDING_FACTOR;␊ |
235 | ␊ |
236 | ␉/*␊ |
237 | ␉ * Set current measured speed.␊ |
238 | ␉ */␊ |
239 | ␉if (cycles >= 0x100000000ULL)␊ |
240 | ␉{␊ |
241 | ␉␉gPEClockFrequencyInfo.cpu_clock_rate_hz = 0xFFFFFFFFUL;␊ |
242 | ␉}␊ |
243 | ␉else␊ |
244 | ␉{␊ |
245 | ␉␉gPEClockFrequencyInfo.cpu_clock_rate_hz = (unsigned long)cycles;␊ |
246 | ␉}␊ |
247 | ␉gPEClockFrequencyInfo.cpu_frequency_hz = cycles;␊ |
248 | ␊ |
249 | ␉//printf("[RTCLOCK_1] frequency %llu (%llu) %llu\n", cycles, rtc_cyc_per_sec,timeRDTSC() * 20);␊ |
250 | ␉return(rtc_cyc_per_sec);␊ |
251 | }␊ |
252 | ␊ |
253 | // Bronya C1E fix␊ |
254 | static void post_startup_cpu_fixups(void)␊ |
255 | {␊ |
256 | ␉/*␊ |
257 | ␉ * Some AMD processors support C1E state. Entering this state will␊ |
258 | ␉ * cause the local APIC timer to stop, which we can't deal with at␊ |
259 | ␉ * this time.␊ |
260 | ␉ */␊ |
261 | ␊ |
262 | ␉uint64_t reg;␊ |
263 | ␉verbose("\tLooking to disable C1E if is already enabled by the BIOS:\n");␊ |
264 | ␉reg = rdmsr64(MSR_AMD_INT_PENDING_CMP_HALT);␊ |
265 | ␉/* Disable C1E state if it is enabled by the BIOS */␊ |
266 | ␉if ((reg >> AMD_ACTONCMPHALT_SHIFT) & AMD_ACTONCMPHALT_MASK)␊ |
267 | ␉{␊ |
268 | ␉␉reg &= ~(AMD_ACTONCMPHALT_MASK << AMD_ACTONCMPHALT_SHIFT);␊ |
269 | ␉␉wrmsr64(MSR_AMD_INT_PENDING_CMP_HALT, reg);␊ |
270 | ␉␉verbose("\tC1E disabled!\n");␊ |
271 | ␉}␊ |
272 | }␊ |
273 | ␊ |
274 | /*␊ |
275 | * Large memcpy() into MMIO space can take longer than 1 clock tick (55ms).␊ |
276 | * The timer interrupt must remain responsive when updating VRAM so␊ |
277 | * as not to miss timer interrupts during countdown().␊ |
278 | *␊ |
279 | * If interrupts are enabled, use normal memcpy.␊ |
280 | *␊ |
281 | * If interrupts are disabled, breaks memcpy down␊ |
282 | * into 128K chunks, times itself and makes a bios␊ |
283 | * real-mode call every 25 msec in order to service␊ |
284 | * pending interrupts.␊ |
285 | *␊ |
286 | * -- zenith432, May 22nd, 2016␊ |
287 | */␊ |
288 | void* memcpy_interruptible(void* dst, const void* src, size_t len)␊ |
289 | {␊ |
290 | ␉uint64_t tscFreq, lastTsc;␊ |
291 | ␉uint32_t eflags, threshold;␊ |
292 | ␉ptrdiff_t offset;␊ |
293 | ␉const size_t chunk = 131072U;␉// 128K␊ |
294 | ␊ |
295 | ␉if (len <= chunk)␊ |
296 | ␉{␊ |
297 | ␉␉/*␊ |
298 | ␉␉ * Short memcpy - use normal.␊ |
299 | ␉␉ */␊ |
300 | ␉␉return memcpy(dst, src, len);␊ |
301 | ␉}␊ |
302 | ␊ |
303 | ␉__asm__ volatile("pushfl; popl %0" : "=r"(eflags));␊ |
304 | ␉if (eflags & 0x200U)␊ |
305 | ␉{␊ |
306 | ␉␉/*␊ |
307 | ␉␉ * Interrupts are enabled - use normal memcpy.␊ |
308 | ␉␉ */␊ |
309 | ␉␉return memcpy(dst, src, len);␊ |
310 | ␉}␊ |
311 | ␊ |
312 | ␉tscFreq = Platform.CPU.TSCFrequency;␊ |
313 | ␉if ((uint32_t) (tscFreq >> 32))␊ |
314 | ␉{␊ |
315 | ␉␉/*␊ |
316 | ␉␉ * If TSC Frequency >= 2 ** 32, use a default time threshold.␊ |
317 | ␉␉ */␊ |
318 | ␉␉threshold = (~0U) / 40U;␊ |
319 | ␉}␊ |
320 | ␉else if (!(uint32_t) tscFreq)␊ |
321 | ␉{␊ |
322 | ␉␉/*␊ |
323 | ␉␉ * If early on and TSC Frequency hasn't been estimated yet,␊ |
324 | ␉␉ * use normal memcpy.␊ |
325 | ␉␉ */␊ |
326 | ␉␉return memcpy(dst, src, len);␊ |
327 | ␉}␊ |
328 | ␉else␊ |
329 | ␉{␊ |
330 | ␉␉threshold = ((uint32_t) tscFreq) / 40U;␊ |
331 | ␉}␊ |
332 | ␊ |
333 | ␉/*␊ |
334 | ␉ * Do the work␊ |
335 | ␉ */␊ |
336 | ␉offset = 0;␊ |
337 | ␉lastTsc = rdtsc64();␊ |
338 | ␉do␊ |
339 | ␉{␊ |
340 | ␉␉(void) memcpy((char*) dst + offset, (const char*) src + offset, chunk);␊ |
341 | ␉␉offset += (ptrdiff_t) chunk;␊ |
342 | ␉␉len -= chunk;␊ |
343 | ␉␉if ((rdtsc64() - lastTsc) < threshold)␊ |
344 | ␉␉{␊ |
345 | ␉␉␉continue;␊ |
346 | ␉␉}␊ |
347 | ␉␉(void) readKeyboardStatus();␉// visit real-mode␊ |
348 | ␉␉lastTsc = rdtsc64();␊ |
349 | ␉}␊ |
350 | ␉while (len > chunk);␊ |
351 | ␉if (len)␊ |
352 | ␉{␊ |
353 | ␉␉(void) memcpy((char*) dst + offset, (const char*) src + offset, len);␊ |
354 | ␉}␊ |
355 | ␉return dst;␊ |
356 | }␊ |
357 | ␊ |
358 | /*␊ |
359 | * Calculates the FSB and CPU frequencies using specific MSRs for each CPU␊ |
360 | * - multi. is read from a specific MSR. In the case of Intel, there is:␊ |
361 | *␉ a max multi. (used to calculate the FSB freq.),␊ |
362 | *␉ and a current multi. (used to calculate the CPU freq.)␊ |
363 | * - busFrequency = tscFrequency / multi␊ |
364 | * - cpuFrequency = busFrequency * multi␊ |
365 | */␊ |
366 | ␊ |
367 | /* Decimal powers: */␊ |
368 | #define kilo (1000ULL)␊ |
369 | #define Mega (kilo * kilo)␊ |
370 | #define Giga (kilo * Mega)␊ |
371 | #define Tera (kilo * Giga)␊ |
372 | #define Peta (kilo * Tera)␊ |
373 | ␊ |
374 | #define quad(hi,lo)␉(((uint64_t)(hi)) << 32 | (lo))␊ |
375 | ␊ |
376 | void get_cpuid(PlatformInfo_t *p)␊ |
377 | {␊ |
378 | ␊ |
379 | ␉char␉␉str[128];␊ |
380 | ␉uint32_t␉reg[4];␊ |
381 | ␉char␉␉*s␉␉␉= 0;␊ |
382 | ␊ |
383 | ␊ |
384 | ␉do_cpuid(0x00000000, p->CPU.CPUID[CPUID_0]); // MaxFn, Vendor␊ |
385 | ␉do_cpuid(0x00000001, p->CPU.CPUID[CPUID_1]); // Signature, stepping, features␊ |
386 | ␉do_cpuid(0x00000002, p->CPU.CPUID[CPUID_2]); // TLB/Cache/Prefetch␊ |
387 | ␊ |
388 | ␉do_cpuid(0x00000003, p->CPU.CPUID[CPUID_3]); // S/N␊ |
389 | ␉do_cpuid(0x80000000, p->CPU.CPUID[CPUID_80]); // Get the max extended cpuid␊ |
390 | ␊ |
391 | ␉if ((p->CPU.CPUID[CPUID_80][0] & 0x0000000f) >= 8)␊ |
392 | ␉{␊ |
393 | ␉␉do_cpuid(0x80000008, p->CPU.CPUID[CPUID_88]);␊ |
394 | ␉␉do_cpuid(0x80000001, p->CPU.CPUID[CPUID_81]);␊ |
395 | ␉}␊ |
396 | ␉else if ((p->CPU.CPUID[CPUID_80][0] & 0x0000000f) >= 1)␊ |
397 | ␉{␊ |
398 | ␉␉do_cpuid(0x80000001, p->CPU.CPUID[CPUID_81]);␊ |
399 | ␉}␊ |
400 | ␊ |
401 | // ==============================================================␊ |
402 | ␊ |
403 | ␉/* get BrandString (if supported) */␊ |
404 | ␉/* Copyright: from Apple's XNU cpuid.c */␊ |
405 | ␉if (p->CPU.CPUID[CPUID_80][0] > 0x80000004)␊ |
406 | ␉{␊ |
407 | ␉␉bzero(str, 128);␊ |
408 | ␉␉/*␊ |
409 | ␉␉ * The BrandString 48 bytes (max), guaranteed to␊ |
410 | ␉␉ * be NULL terminated.␊ |
411 | ␉␉ */␊ |
412 | ␉␉do_cpuid(0x80000002, reg);␊ |
413 | ␉␉memcpy(&str[0], (char *)reg, 16);␊ |
414 | ␉␉do_cpuid(0x80000003, reg);␊ |
415 | ␉␉memcpy(&str[16], (char *)reg, 16);␊ |
416 | ␉␉do_cpuid(0x80000004, reg);␊ |
417 | ␉␉memcpy(&str[32], (char *)reg, 16);␊ |
418 | ␉␉for (s = str; *s != '\0'; s++)␊ |
419 | ␉␉{␊ |
420 | ␉␉␉if (*s != ' ')␊ |
421 | ␉␉␉{␊ |
422 | ␉␉␉␉break;␊ |
423 | ␉␉␉}␊ |
424 | ␉␉}␊ |
425 | ␉␉strlcpy(p->CPU.BrandString, s, 48);␊ |
426 | ␊ |
427 | ␉␉if (!strncmp(p->CPU.BrandString, CPU_STRING_UNKNOWN, MIN(sizeof(p->CPU.BrandString), (unsigned)strlen(CPU_STRING_UNKNOWN) + 1)))␊ |
428 | ␉␉{␊ |
429 | ␉␉␉/*␊ |
430 | ␉␉␉ * This string means we have a firmware-programmable brand string,␊ |
431 | ␉␉␉ * and the firmware couldn't figure out what sort of CPU we have.␊ |
432 | ␉␉␉ */␊ |
433 | ␉␉␉p->CPU.BrandString[0] = '\0';␊ |
434 | ␉␉}␊ |
435 | ␉␉p->CPU.BrandString[47] = '\0';␊ |
436 | //␉␉DBG("\tBrandstring = %s\n", p->CPU.BrandString);␊ |
437 | ␉}␊ |
438 | ␊ |
439 | // ==============================================================␊ |
440 | ␊ |
441 | ␉switch(p->CPU.BrandString[0])␊ |
442 | ␉{␊ |
443 | ␉␉case 'A':␊ |
444 | ␉␉␉/* AMD Processors */␊ |
445 | ␉␉␉// The cache information is only in ecx and edx so only save␊ |
446 | ␉␉␉// those registers␊ |
447 | ␊ |
448 | ␉␉␉do_cpuid(5, p->CPU.CPUID[CPUID_5]); // Monitor/Mwait␊ |
449 | ␊ |
450 | ␉␉␉do_cpuid(0x80000005, p->CPU.CPUID[CPUID_85]); // TLB/Cache/Prefetch␊ |
451 | ␉␉␉do_cpuid(0x80000006, p->CPU.CPUID[CPUID_86]); // TLB/Cache/Prefetch␊ |
452 | ␉␉␉do_cpuid(0x80000008, p->CPU.CPUID[CPUID_88]);␊ |
453 | ␊ |
454 | ␉␉␉break;␊ |
455 | ␊ |
456 | ␉␉case 'G':␊ |
457 | ␉␉␉/* Intel Processors */␊ |
458 | ␉␉␉do_cpuid2(0x00000004, 0, p->CPU.CPUID[CPUID_4]); // Cache Index for Inte␊ |
459 | ␊ |
460 | ␉␉␉if (p->CPU.CPUID[CPUID_0][0] >= 0x5)␉// Monitor/Mwait␊ |
461 | ␉␉␉{␊ |
462 | ␉␉␉␉do_cpuid(5, p->CPU.CPUID[CPUID_5]);␊ |
463 | ␉␉␉}␊ |
464 | ␊ |
465 | ␉␉␉if (p->CPU.CPUID[CPUID_0][0] >= 6)␉// Thermal/Power␊ |
466 | ␉␉␉{␊ |
467 | ␉␉␉␉do_cpuid(6, p->CPU.CPUID[CPUID_6]);␊ |
468 | ␉␉␉}␊ |
469 | ␊ |
470 | ␉␉␉break;␊ |
471 | ␉}␊ |
472 | }␊ |
473 | void scan_cpu(PlatformInfo_t *p)␊ |
474 | {␊ |
475 | ␉verbose("[ CPU INFO ]\n");␊ |
476 | ␉get_cpuid(p);␊ |
477 | ␊ |
478 | ␉uint64_t␉busFCvtt2n;␊ |
479 | ␉uint64_t␉tscFCvtt2n;␊ |
480 | ␉uint64_t␉tscFreq␉␉␉= 0;␊ |
481 | ␉uint64_t␉busFrequency␉␉= 0;␊ |
482 | ␉uint64_t␉cpuFrequency␉␉= 0;␊ |
483 | ␉uint64_t␉msr␉␉␉= 0;␊ |
484 | ␉uint64_t␉flex_ratio␉␉= 0;␊ |
485 | ␉uint64_t␉cpuid_features;␊ |
486 | ␊ |
487 | ␉uint32_t␉max_ratio␉␉= 0;␊ |
488 | ␉uint32_t␉min_ratio␉␉= 0;␊ |
489 | ␉uint32_t␉reg[4];␊ |
490 | ␉uint32_t␉cores_per_package␉= 0;␊ |
491 | ␉uint32_t␉logical_per_package␉= 1;␊ |
492 | ␉uint32_t␉threads_per_core␉= 1;␊ |
493 | ␊ |
494 | ␉uint8_t␉␉bus_ratio_max␉␉= 0;␊ |
495 | ␉uint8_t␉␉bus_ratio_min␉␉= 0;␊ |
496 | ␉uint8_t␉␉currdiv␉␉␉= 0;␊ |
497 | ␉uint8_t␉␉currcoef␉␉= 0;␊ |
498 | ␉uint8_t␉␉maxdiv␉␉␉= 0;␊ |
499 | ␉uint8_t␉␉maxcoef␉␉␉= 0;␊ |
500 | ␉uint8_t␉␉pic0_mask;␊ |
501 | ␉uint8_t␉␉cpuMultN2␉␉= 0;␊ |
502 | ␊ |
503 | ␉const char␉*newratio;␊ |
504 | ␊ |
505 | ␉int␉␉len␉␉␉= 0;␊ |
506 | ␉int␉␉myfsb␉␉␉= 0;␊ |
507 | ␉int␉␉i␉␉␉= 0;␊ |
508 | ␊ |
509 | ␊ |
510 | /* http://www.flounder.com/cpuid_explorer2.htm␊ |
511 | EAX (Intel):␊ |
512 | 31 28 27 20 19 16 1514 1312 11 8 7 4 3 0␊ |
513 | +--------+----------------+--------+----+----+--------+--------+--------+␊ |
514 | |########|Extended family |Extmodel|####|type|familyid| model |stepping|␊ |
515 | +--------+----------------+--------+----+----+--------+--------+--------+␊ |
516 | ␊ |
517 | EAX (AMD):␊ |
518 | 31 28 27 20 19 16 1514 1312 11 8 7 4 3 0␊ |
519 | +--------+----------------+--------+----+----+--------+--------+--------+␊ |
520 | |########|Extended family |Extmodel|####|####|familyid| model |stepping|␊ |
521 | +--------+----------------+--------+----+----+--------+--------+--------+␊ |
522 | */␊ |
523 | ␉///////////////////-- MaxFn,Vendor --////////////////////////␊ |
524 | ␉p->CPU.Vendor␉␉= p->CPU.CPUID[CPUID_0][1];␊ |
525 | ␊ |
526 | ␉///////////////////-- Signature, stepping, features -- //////␊ |
527 | ␉cpuid_features = quad(p->CPU.CPUID[CPUID_1][ecx], p->CPU.CPUID[CPUID_1][edx]);␊ |
528 | ␉if (bit(28) & p->CPU.CPUID[CPUID_1][edx]) // HTT/Multicore␊ |
529 | ␉{␊ |
530 | ␉␉logical_per_package = bitfield(p->CPU.CPUID[CPUID_1][ebx], 23, 16);␊ |
531 | ␉}␊ |
532 | ␉else␊ |
533 | ␉{␊ |
534 | ␉␉logical_per_package = 1;␊ |
535 | ␉}␊ |
536 | ␊ |
537 | ␉p->CPU.Signature␉= p->CPU.CPUID[CPUID_1][0];␊ |
538 | ␉p->CPU.Stepping␉␉= (uint8_t)bitfield(p->CPU.CPUID[CPUID_1][0], 3, 0);␉// stepping = cpu_feat_eax & 0xF;␊ |
539 | ␉p->CPU.Model␉␉= (uint8_t)bitfield(p->CPU.CPUID[CPUID_1][0], 7, 4);␉// model = (cpu_feat_eax >> 4) & 0xF;␊ |
540 | ␉p->CPU.Family␉␉= (uint8_t)bitfield(p->CPU.CPUID[CPUID_1][0], 11, 8);␉// family = (cpu_feat_eax >> 8) & 0xF;␊ |
541 | ␉//p->CPU.Type␉␉= (uint8_t)bitfield(p->CPU.CPUID[CPUID_1][0], 13, 12);␉// type = (cpu_feat_eax >> 12) & 0x3;␊ |
542 | ␉p->CPU.ExtModel␉␉= (uint8_t)bitfield(p->CPU.CPUID[CPUID_1][0], 19, 16);␉// ext_model = (cpu_feat_eax >> 16) & 0xF;␊ |
543 | ␉p->CPU.ExtFamily␉= (uint8_t)bitfield(p->CPU.CPUID[CPUID_1][0], 27, 20);␉// ext_family = (cpu_feat_eax >> 20) & 0xFF;␊ |
544 | ␊ |
545 | ␉if (p->CPU.Family == 0x0f)␊ |
546 | ␉{␊ |
547 | ␉␉p->CPU.Family += p->CPU.ExtFamily;␊ |
548 | ␉}␊ |
549 | ␊ |
550 | ␉if (p->CPU.Family == 0x0f || p->CPU.Family == 0x06)␊ |
551 | ␉{␊ |
552 | ␉␉p->CPU.Model += (p->CPU.ExtModel << 4);␊ |
553 | ␉}␊ |
554 | ␊ |
555 | ␉switch (p->CPU.Vendor)␊ |
556 | ␉{␊ |
557 | ␉␉case CPUID_VENDOR_INTEL:␊ |
558 | ␉␉{␊ |
559 | ␉␉␉/* Based on Apple's XNU cpuid.c - Deterministic cache parameters */␊ |
560 | ␉␉␉if ((p->CPU.CPUID[CPUID_0][eax] > 3) && (p->CPU.CPUID[CPUID_0][eax] < 0x80000000))␊ |
561 | ␉␉␉{␊ |
562 | ␉␉␉␉for (i = 0; i < 0xFF; i++) // safe loop␊ |
563 | ␉␉␉␉{␊ |
564 | ␉␉␉␉␉do_cpuid2(0x00000004, i, reg); // AX=4: Fn, CX=i: cache index␊ |
565 | ␉␉␉␉␉if (bitfield(reg[eax], 4, 0) == 0)␊ |
566 | ␉␉␉␉␉{␊ |
567 | ␉␉␉␉␉␉break;␊ |
568 | ␉␉␉␉␉}␊ |
569 | ␉␉␉␉␉cores_per_package = bitfield(reg[eax], 31, 26) + 1;␊ |
570 | ␉␉␉␉}␊ |
571 | ␉␉␉}␊ |
572 | ␊ |
573 | ␉␉␉if (i > 0)␊ |
574 | ␉␉␉{␊ |
575 | ␉␉␉␉cores_per_package = bitfield(p->CPU.CPUID[CPUID_4][eax], 31, 26) + 1; // i = cache index␊ |
576 | ␉␉␉␉threads_per_core = bitfield(p->CPU.CPUID[CPUID_4][eax], 25, 14) + 1;␊ |
577 | ␉␉␉}␊ |
578 | ␊ |
579 | ␉␉␉if (cores_per_package == 0)␊ |
580 | ␉␉␉{␊ |
581 | ␉␉␉␉cores_per_package = 1;␊ |
582 | ␉␉␉}␊ |
583 | ␊ |
584 | ␉␉␉switch (p->CPU.Model)␊ |
585 | ␉␉␉{␊ |
586 | ␉␉␉␉case CPUID_MODEL_NEHALEM: // Intel Core i7 LGA1366 (45nm)␊ |
587 | ␉␉␉␉case CPUID_MODEL_FIELDS: // Intel Core i5, i7 LGA1156 (45nm)␊ |
588 | ␉␉␉␉case CPUID_MODEL_CLARKDALE: // Intel Core i3, i5, i7 LGA1156 (32nm)␊ |
589 | ␉␉␉␉case CPUID_MODEL_NEHALEM_EX:␊ |
590 | ␉␉␉␉case CPUID_MODEL_JAKETOWN:␊ |
591 | ␉␉␉␉case CPUID_MODEL_SANDYBRIDGE:␊ |
592 | ␉␉␉␉case CPUID_MODEL_IVYBRIDGE:␊ |
593 | ␉␉␉␉case CPUID_MODEL_HASWELL_U5:␊ |
594 | ␉␉␉␉case CPUID_MODEL_HASWELL:␊ |
595 | ␉␉␉␉case CPUID_MODEL_HASWELL_SVR:␊ |
596 | ␉␉␉␉//case CPUID_MODEL_HASWELL_H:␊ |
597 | ␉␉␉␉case CPUID_MODEL_HASWELL_ULT:␊ |
598 | ␉␉␉␉case CPUID_MODEL_HASWELL_ULX:␊ |
599 | ␉␉␉␉case CPUID_MODEL_BROADWELL_HQ:␊ |
600 | ␉␉␉␉case CPUID_MODEL_BRODWELL_SVR:␊ |
601 | ␉␉␉␉case CPUID_MODEL_SKYLAKE_S:␊ |
602 | ␉␉␉␉//case CPUID_MODEL_:␊ |
603 | ␉␉␉␉␉msr = rdmsr64(MSR_CORE_THREAD_COUNT); // 0x35␊ |
604 | ␉␉␉␉␉p->CPU.NoCores␉␉= (uint32_t)bitfield((uint32_t)msr, 31, 16);␊ |
605 | ␉␉␉␉␉p->CPU.NoThreads␉= (uint32_t)bitfield((uint32_t)msr, 15, 0);␊ |
606 | ␉␉␉␉␉break;␊ |
607 | ␊ |
608 | ␉␉␉␉case CPUID_MODEL_DALES:␊ |
609 | ␉␉␉␉case CPUID_MODEL_WESTMERE: // Intel Core i7 LGA1366 (32nm) 6 Core␊ |
610 | ␉␉␉␉case CPUID_MODEL_WESTMERE_EX:␊ |
611 | ␉␉␉␉␉msr = rdmsr64(MSR_CORE_THREAD_COUNT);␊ |
612 | ␉␉␉␉␉p->CPU.NoCores␉␉= (uint32_t)bitfield((uint32_t)msr, 19, 16);␊ |
613 | ␉␉␉␉␉p->CPU.NoThreads␉= (uint32_t)bitfield((uint32_t)msr, 15, 0);␊ |
614 | ␉␉␉␉␉break;␊ |
615 | ␉␉␉␉case CPUID_MODEL_ATOM_3700:␊ |
616 | ␉␉␉␉␉p->CPU.NoCores␉␉= 4;␊ |
617 | ␉␉␉␉␉p->CPU.NoThreads␉= 4;␊ |
618 | ␉␉␉␉␉break;␊ |
619 | ␉␉␉␉case CPUID_MODEL_ATOM:␊ |
620 | ␉␉␉␉␉p->CPU.NoCores␉␉= 2;␊ |
621 | ␉␉␉␉␉p->CPU.NoThreads␉= 2;␊ |
622 | ␉␉␉␉␉break;␊ |
623 | ␉␉␉␉default:␊ |
624 | ␉␉␉␉␉p->CPU.NoCores␉␉= 0;␊ |
625 | ␉␉␉␉␉break;␊ |
626 | ␉␉␉}␊ |
627 | ␊ |
628 | ␉␉␉if (p->CPU.NoCores == 0)␊ |
629 | ␉␉␉{␊ |
630 | ␉␉␉␉p->CPU.NoCores␉␉= cores_per_package;␊ |
631 | ␉␉␉␉p->CPU.NoThreads␉= logical_per_package;␊ |
632 | ␉␉␉}␊ |
633 | ␊ |
634 | ␉␉␉// MSR is *NOT* available on the Intel Atom CPU␊ |
635 | ␉␉␉// workaround for N270. I don't know why it detected wrong␊ |
636 | ␉␉␉if ((p->CPU.Model == CPUID_MODEL_ATOM) && (strstr(p->CPU.BrandString, "270")))␊ |
637 | ␉␉␉{␊ |
638 | ␉␉␉␉p->CPU.NoCores␉␉= 1;␊ |
639 | ␉␉␉␉p->CPU.NoThreads␉= 2;␊ |
640 | ␉␉␉}␊ |
641 | ␊ |
642 | ␊ |
643 | ␉␉␉// workaround for Xeon Harpertown and Yorkfield␊ |
644 | ␉␉␉if ((p->CPU.Model == CPUID_MODEL_PENRYN) &&␊ |
645 | ␉␉␉␉(p->CPU.NoCores␉== 0))␊ |
646 | ␉␉␉{␊ |
647 | ␉␉␉␉if ((strstr(p->CPU.BrandString, "X54")) ||␊ |
648 | ␉␉␉␉␉(strstr(p->CPU.BrandString, "E54")) ||␊ |
649 | ␉␉␉␉␉(strstr(p->CPU.BrandString, "W35")) ||␊ |
650 | ␉␉␉␉␉(strstr(p->CPU.BrandString, "X34")) ||␊ |
651 | ␉␉␉␉␉(strstr(p->CPU.BrandString, "X33")) ||␊ |
652 | ␉␉␉␉␉(strstr(p->CPU.BrandString, "L33")) ||␊ |
653 | ␉␉␉␉␉(strstr(p->CPU.BrandString, "X32")) ||␊ |
654 | ␉␉␉␉␉(strstr(p->CPU.BrandString, "L3426")) ||␊ |
655 | ␉␉␉␉␉(strstr(p->CPU.BrandString, "L54")))␊ |
656 | ␉␉␉␉{␊ |
657 | ␉␉␉␉␉p->CPU.NoCores␉␉= 4;␊ |
658 | ␉␉␉␉␉p->CPU.NoThreads␉= 4;␊ |
659 | ␉␉␉␉} else if (strstr(p->CPU.BrandString, "W36")) {␊ |
660 | ␉␉␉␉␉p->CPU.NoCores␉␉= 6;␊ |
661 | ␉␉␉␉␉p->CPU.NoThreads␉= 6;␊ |
662 | ␉␉␉␉} else { //other Penryn and Wolfdale␊ |
663 | ␉␉␉␉␉p->CPU.NoCores␉␉= 0;␊ |
664 | ␉␉␉␉␉p->CPU.NoThreads␉= 0;␊ |
665 | ␉␉␉␉}␊ |
666 | ␉␉␉}␊ |
667 | ␊ |
668 | ␉␉␉// workaround for Quad␊ |
669 | ␉␉␉if ( strstr(p->CPU.BrandString, "Quad") )␊ |
670 | ␉␉␉{␊ |
671 | ␉␉␉␉p->CPU.NoCores␉␉= 4;␊ |
672 | ␉␉␉␉p->CPU.NoThreads␉= 4;␊ |
673 | ␉␉␉}␊ |
674 | ␉␉}␊ |
675 | ␊ |
676 | ␉␉break;␊ |
677 | ␊ |
678 | ␉␉case CPUID_VENDOR_AMD:␊ |
679 | ␉␉{␊ |
680 | ␉␉␉post_startup_cpu_fixups();␊ |
681 | ␉␉␉cores_per_package = bitfield(p->CPU.CPUID[CPUID_88][ecx], 7, 0) + 1;␊ |
682 | ␉␉␉threads_per_core = cores_per_package;␊ |
683 | ␊ |
684 | ␉␉␉if (cores_per_package == 0)␊ |
685 | ␉␉␉{␊ |
686 | ␉␉␉␉cores_per_package = 1;␊ |
687 | ␉␉␉}␊ |
688 | ␊ |
689 | ␉␉␉p->CPU.NoCores␉␉= cores_per_package;␊ |
690 | ␉␉␉p->CPU.NoThreads␉= logical_per_package;␊ |
691 | ␊ |
692 | ␉␉␉if (p->CPU.NoCores == 0)␊ |
693 | ␉␉␉{␊ |
694 | ␉␉␉␉p->CPU.NoCores = 1;␊ |
695 | ␉␉␉␉p->CPU.NoThreads␉= 1;␊ |
696 | ␉␉␉}␊ |
697 | ␉␉}␊ |
698 | ␉␉break;␊ |
699 | ␊ |
700 | ␉␉default :␊ |
701 | ␉␉␉stop("Unsupported CPU detected! System halted.");␊ |
702 | ␉}␊ |
703 | ␊ |
704 | ␉/* setup features */␊ |
705 | ␉if ((bit(23) & p->CPU.CPUID[CPUID_1][3]) != 0)␊ |
706 | ␉{␊ |
707 | ␉␉p->CPU.Features |= CPU_FEATURE_MMX;␊ |
708 | ␉}␊ |
709 | ␊ |
710 | ␉if ((bit(25) & p->CPU.CPUID[CPUID_1][3]) != 0)␊ |
711 | ␉{␊ |
712 | ␉␉p->CPU.Features |= CPU_FEATURE_SSE;␊ |
713 | ␉}␊ |
714 | ␊ |
715 | ␉if ((bit(26) & p->CPU.CPUID[CPUID_1][3]) != 0)␊ |
716 | ␉{␊ |
717 | ␉␉p->CPU.Features |= CPU_FEATURE_SSE2;␊ |
718 | ␉}␊ |
719 | ␊ |
720 | ␉if ((bit(0) & p->CPU.CPUID[CPUID_1][2]) != 0)␊ |
721 | ␉{␊ |
722 | ␉␉p->CPU.Features |= CPU_FEATURE_SSE3;␊ |
723 | ␉}␊ |
724 | ␊ |
725 | ␉if ((bit(19) & p->CPU.CPUID[CPUID_1][2]) != 0)␊ |
726 | ␉{␊ |
727 | ␉␉p->CPU.Features |= CPU_FEATURE_SSE41;␊ |
728 | ␉}␊ |
729 | ␊ |
730 | ␉if ((bit(20) & p->CPU.CPUID[CPUID_1][2]) != 0)␊ |
731 | ␉{␊ |
732 | ␉␉p->CPU.Features |= CPU_FEATURE_SSE42;␊ |
733 | ␉}␊ |
734 | ␊ |
735 | ␉if ((bit(29) & p->CPU.CPUID[CPUID_81][3]) != 0)␊ |
736 | ␉{␊ |
737 | ␉␉p->CPU.Features |= CPU_FEATURE_EM64T;␊ |
738 | ␉}␊ |
739 | ␊ |
740 | ␉if ((bit(5) & p->CPU.CPUID[CPUID_1][3]) != 0)␊ |
741 | ␉{␊ |
742 | ␉␉p->CPU.Features |= CPU_FEATURE_MSR;␊ |
743 | ␉}␊ |
744 | ␊ |
745 | ␉if ((p->CPU.NoThreads > p->CPU.NoCores))␊ |
746 | ␉{␊ |
747 | ␉␉p->CPU.Features |= CPU_FEATURE_HTT;␊ |
748 | ␉}␊ |
749 | ␊ |
750 | ␉pic0_mask = inb(0x21U);␊ |
751 | ␉outb(0x21U, 0xFFU); // mask PIC0 interrupts for duration of timing tests␊ |
752 | ␊ |
753 | ␉uint64_t cycles;␊ |
754 | ␉cycles = timeRDTSC();␊ |
755 | ␉tscFreq = rtc_set_cyc_per_sec(cycles);␊ |
756 | ␉DBG("cpu freq classic = 0x%016llx\n", tscFreq);␊ |
757 | ␉// if usual method failed␊ |
758 | ␉if ( tscFreq < 1000 )␉//TEST␊ |
759 | ␉{␊ |
760 | ␉␉tscFreq = measure_tsc_frequency();//timeRDTSC() * 20;//measure_tsc_frequency();␊ |
761 | ␉␉// DBG("cpu freq timeRDTSC = 0x%016llx\n", tscFrequency);␊ |
762 | ␉}␊ |
763 | ␊ |
764 | ␉if (p->CPU.Vendor==CPUID_VENDOR_INTEL && ((p->CPU.Family == 0x06 && p->CPU.Model >= 0x0c) || (p->CPU.Family == 0x0f && p->CPU.Model >= 0x03)))␊ |
765 | ␉{␊ |
766 | ␉␉int intelCPU = p->CPU.Model;␊ |
767 | ␉␉if (p->CPU.Family == 0x06)␊ |
768 | ␉␉{␊ |
769 | ␉␉␉/* Nehalem CPU model */␊ |
770 | ␉␉␉switch (p->CPU.Model)␊ |
771 | ␉␉␉{␊ |
772 | ␉␉␉␉case CPUID_MODEL_NEHALEM:␊ |
773 | ␉␉␉␉case CPUID_MODEL_FIELDS:␊ |
774 | ␉␉␉␉case CPUID_MODEL_CLARKDALE:␊ |
775 | ␉␉␉␉case CPUID_MODEL_DALES:␊ |
776 | ␉␉␉␉case CPUID_MODEL_WESTMERE:␊ |
777 | ␉␉␉␉case CPUID_MODEL_NEHALEM_EX:␊ |
778 | ␉␉␉␉case CPUID_MODEL_WESTMERE_EX:␊ |
779 | /* --------------------------------------------------------- */␊ |
780 | ␉␉␉␉case CPUID_MODEL_SANDYBRIDGE:␊ |
781 | ␉␉␉␉case CPUID_MODEL_JAKETOWN:␊ |
782 | ␉␉␉␉case CPUID_MODEL_IVYBRIDGE_XEON:␊ |
783 | ␉␉␉␉case CPUID_MODEL_IVYBRIDGE:␊ |
784 | ␉␉␉␉case CPUID_MODEL_ATOM_3700:␊ |
785 | ␉␉␉␉case CPUID_MODEL_HASWELL:␊ |
786 | ␉␉␉␉case CPUID_MODEL_HASWELL_U5:␊ |
787 | ␉␉␉␉case CPUID_MODEL_HASWELL_SVR:␊ |
788 | ␊ |
789 | ␉␉␉␉case CPUID_MODEL_HASWELL_ULT:␊ |
790 | ␉␉␉␉case CPUID_MODEL_HASWELL_ULX:␊ |
791 | ␉␉␉␉case CPUID_MODEL_BROADWELL_HQ:␊ |
792 | ␉␉␉␉case CPUID_MODEL_SKYLAKE_S:␊ |
793 | /* --------------------------------------------------------- */␊ |
794 | ␉␉␉␉␉msr = rdmsr64(MSR_PLATFORM_INFO);␊ |
795 | ␉␉␉␉␉DBG("msr(%d): platform_info %08x\n", __LINE__, bitfield(msr, 31, 0));␊ |
796 | ␉␉␉␉␉bus_ratio_max = bitfield(msr, 15, 8);␊ |
797 | ␉␉␉␉␉bus_ratio_min = bitfield(msr, 47, 40); //valv: not sure about this one (Remarq.1)␊ |
798 | ␉␉␉␉␉msr = rdmsr64(MSR_FLEX_RATIO);␊ |
799 | ␉␉␉␉␉DBG("msr(%d): flex_ratio %08x\n", __LINE__, bitfield(msr, 31, 0));␊ |
800 | ␉␉␉␉␉if (bitfield(msr, 16, 16))␊ |
801 | ␉␉␉␉␉{␊ |
802 | ␉␉␉␉␉␉flex_ratio = bitfield(msr, 15, 8);␊ |
803 | ␉␉␉␉␉␉// bcc9: at least on the gigabyte h67ma-ud2h,␊ |
804 | ␉␉␉␉␉␉// where the cpu multipler can't be changed to␊ |
805 | ␉␉␉␉␉␉// allow overclocking, the flex_ratio msr has unexpected (to OSX)␊ |
806 | ␉␉␉␉␉␉// contents.␉These contents cause mach_kernel to␊ |
807 | ␉␉␉␉␉␉// fail to compute the bus ratio correctly, instead␊ |
808 | ␉␉␉␉␉␉// causing the system to crash since tscGranularity␊ |
809 | ␉␉␉␉␉␉// is inadvertently set to 0.␊ |
810 | ␊ |
811 | ␉␉␉␉␉␉if (flex_ratio == 0)␊ |
812 | ␉␉␉␉␉␉{␊ |
813 | ␉␉␉␉␉␉␉// Clear bit 16 (evidently the presence bit)␊ |
814 | ␉␉␉␉␉␉␉wrmsr64(MSR_FLEX_RATIO, (msr & 0xFFFFFFFFFFFEFFFFULL));␊ |
815 | ␉␉␉␉␉␉␉msr = rdmsr64(MSR_FLEX_RATIO);␊ |
816 | ␉␉␉␉␉␉␉DBG("CPU: Unusable flex ratio detected. Patched MSR now %08x\n", bitfield(msr, 31, 0));␊ |
817 | ␉␉␉␉␉␉}␊ |
818 | ␉␉␉␉␉␉else␊ |
819 | ␉␉␉␉␉␉{␊ |
820 | ␉␉␉␉␉␉␉if (bus_ratio_max > flex_ratio)␊ |
821 | ␉␉␉␉␉␉␉{␊ |
822 | ␉␉␉␉␉␉␉␉bus_ratio_max = flex_ratio;␊ |
823 | ␉␉␉␉␉␉␉}␊ |
824 | ␉␉␉␉␉␉}␊ |
825 | ␉␉␉␉␉}␊ |
826 | ␊ |
827 | ␉␉␉␉␉if (bus_ratio_max)␊ |
828 | ␉␉␉␉␉{␊ |
829 | ␉␉␉␉␉␉busFrequency = (tscFreq / bus_ratio_max);␊ |
830 | ␉␉␉␉␉}␊ |
831 | ␊ |
832 | ␉␉␉␉␉//valv: Turbo Ratio Limit␊ |
833 | ␉␉␉␉␉if ((intelCPU != 0x2e) && (intelCPU != 0x2f))␊ |
834 | ␉␉␉␉␉{␊ |
835 | ␉␉␉␉␉␉msr = rdmsr64(MSR_TURBO_RATIO_LIMIT);␊ |
836 | ␊ |
837 | ␉␉␉␉␉␉cpuFrequency = bus_ratio_max * busFrequency;␊ |
838 | ␉␉␉␉␉␉max_ratio = bus_ratio_max * 10;␊ |
839 | ␉␉␉␉␉}␊ |
840 | ␉␉␉␉␉else␊ |
841 | ␉␉␉␉␉{␊ |
842 | ␉␉␉␉␉␉cpuFrequency = tscFreq;␊ |
843 | ␉␉␉␉␉}␊ |
844 | ␊ |
845 | ␉␉␉␉␉if ((getValueForKey(kbusratio, &newratio, &len, &bootInfo->chameleonConfig)) && (len <= 4))␊ |
846 | ␉␉␉␉␉{␊ |
847 | ␉␉␉␉␉␉max_ratio = atoi(newratio);␊ |
848 | ␉␉␉␉␉␉max_ratio = (max_ratio * 10);␊ |
849 | ␉␉␉␉␉␉if (len >= 3)␊ |
850 | ␉␉␉␉␉␉{␊ |
851 | ␉␉␉␉␉␉␉max_ratio = (max_ratio + 5);␊ |
852 | ␉␉␉␉␉␉}␊ |
853 | ␊ |
854 | ␉␉␉␉␉␉verbose("\tBus-Ratio: min=%d, max=%s\n", bus_ratio_min, newratio);␊ |
855 | ␊ |
856 | ␉␉␉␉␉␉// extreme overclockers may love 320 ;)␊ |
857 | ␉␉␉␉␉␉if ((max_ratio >= min_ratio) && (max_ratio <= 320))␊ |
858 | ␉␉␉␉␉␉{␊ |
859 | ␉␉␉␉␉␉␉cpuFrequency = (busFrequency * max_ratio) / 10;␊ |
860 | ␉␉␉␉␉␉␉if (len >= 3)␊ |
861 | ␉␉␉␉␉␉␉{␊ |
862 | ␉␉␉␉␉␉␉␉maxdiv = 1;␊ |
863 | ␉␉␉␉␉␉␉}␊ |
864 | ␉␉␉␉␉␉␉else␊ |
865 | ␉␉␉␉␉␉␉{␊ |
866 | ␉␉␉␉␉␉␉␉maxdiv = 0;␊ |
867 | ␉␉␉␉␉␉␉}␊ |
868 | ␉␉␉␉␉␉}␊ |
869 | ␉␉␉␉␉␉else␊ |
870 | ␉␉␉␉␉␉{␊ |
871 | ␉␉␉␉␉␉␉max_ratio = (bus_ratio_max * 10);␊ |
872 | ␉␉␉␉␉␉}␊ |
873 | ␉␉␉␉␉}␊ |
874 | ␉␉␉␉␉//valv: to be uncommented if Remarq.1 didn't stick␊ |
875 | ␉␉␉␉␉//if (bus_ratio_max > 0) bus_ratio = flex_ratio;␊ |
876 | ␉␉␉␉␉p->CPU.MaxRatio = max_ratio;␊ |
877 | ␉␉␉␉␉p->CPU.MinRatio = min_ratio;␊ |
878 | ␊ |
879 | ␉␉␉␉myfsb = busFrequency / 1000000;␊ |
880 | ␉␉␉␉verbose("\tSticking with [BCLK: %dMhz, Bus-Ratio: %d]\n", myfsb, max_ratio/10); // Bungo: fixed wrong Bus-Ratio readout␊ |
881 | ␉␉␉␉currcoef = bus_ratio_max;␊ |
882 | ␊ |
883 | ␉␉␉␉break;␊ |
884 | ␊ |
885 | ␉␉␉default:␊ |
886 | ␉␉␉␉msr = rdmsr64(MSR_IA32_PERF_STATUS);␊ |
887 | ␉␉␉␉DBG("msr(%d): ia32_perf_stat 0x%08x\n", __LINE__, bitfield(msr, 31, 0));␊ |
888 | ␉␉␉␉currcoef = bitfield(msr, 12, 8); // Bungo: reverted to 2263 state because of wrong old CPUs freq. calculating␊ |
889 | ␉␉␉␉// Non-integer bus ratio for the max-multi␊ |
890 | ␉␉␉␉maxdiv = bitfield(msr, 46, 46);␊ |
891 | ␉␉␉␉// Non-integer bus ratio for the current-multi (undocumented)␊ |
892 | ␉␉␉␉currdiv = bitfield(msr, 14, 14);␊ |
893 | ␊ |
894 | ␉␉␉␉// This will always be model >= 3␊ |
895 | ␉␉␉␉if ((p->CPU.Family == 0x06 && p->CPU.Model >= 0x0e) || (p->CPU.Family == 0x0f))␊ |
896 | ␉␉␉␉{␊ |
897 | ␉␉␉␉␉/* On these models, maxcoef defines TSC freq */␊ |
898 | ␉␉␉␉␉maxcoef = bitfield(msr, 44, 40);␊ |
899 | ␉␉␉␉}␊ |
900 | ␉␉␉␉else␊ |
901 | ␉␉␉␉{␊ |
902 | ␉␉␉␉␉// On lower models, currcoef defines TSC freq␊ |
903 | ␉␉␉␉␉// XXX␊ |
904 | ␉␉␉␉␉maxcoef = currcoef;␊ |
905 | ␉␉␉␉}␊ |
906 | ␊ |
907 | ␉␉␉␉if (!currcoef)␊ |
908 | ␉␉␉␉{␊ |
909 | ␉␉␉␉␉currcoef = maxcoef;␊ |
910 | ␉␉␉␉}␊ |
911 | ␊ |
912 | ␉␉␉␉if (maxcoef)␊ |
913 | ␉␉␉␉{␊ |
914 | ␉␉␉␉␉if (maxdiv)␊ |
915 | ␉␉␉␉␉{␊ |
916 | ␉␉␉␉␉␉busFrequency = ((tscFreq * 2) / ((maxcoef * 2) + 1));␊ |
917 | ␉␉␉␉␉}␊ |
918 | ␉␉␉␉␉else␊ |
919 | ␉␉␉␉␉{␊ |
920 | ␉␉␉␉␉␉busFrequency = (tscFreq / maxcoef);␊ |
921 | ␉␉␉␉␉}␊ |
922 | ␊ |
923 | ␉␉␉␉␉if (currdiv)␊ |
924 | ␉␉␉␉␉{␊ |
925 | ␉␉␉␉␉␉cpuFrequency = (busFrequency * ((currcoef * 2) + 1) / 2);␊ |
926 | ␉␉␉␉␉}␊ |
927 | ␉␉␉␉␉else␊ |
928 | ␉␉␉␉␉{␊ |
929 | ␉␉␉␉␉␉cpuFrequency = (busFrequency * currcoef);␊ |
930 | ␉␉␉␉␉}␊ |
931 | ␊ |
932 | ␉␉␉␉␉DBG("max: %d%s current: %d%s\n", maxcoef, maxdiv ? ".5" : "",currcoef, currdiv ? ".5" : "");␊ |
933 | ␉␉␉␉}␊ |
934 | ␉␉␉␉break;␊ |
935 | ␉␉␉}␊ |
936 | ␉␉}␊ |
937 | ␉␉// Mobile CPU␊ |
938 | ␉␉if (rdmsr64(MSR_IA32_PLATFORM_ID) & (1<<28))␊ |
939 | ␉␉{␊ |
940 | ␉␉␉p->CPU.Features |= CPU_FEATURE_MOBILE;␊ |
941 | ␉␉}␊ |
942 | ␉}␊ |
943 | ␊ |
944 | ␉else if (p->CPU.Vendor==CPUID_VENDOR_AMD)␊ |
945 | ␉{␊ |
946 | ␉␉switch(p->CPU.Family)␊ |
947 | ␉␉{␊ |
948 | ␉␉␉case 0xF: /* K8 */␊ |
949 | ␉␉␉{␊ |
950 | ␉␉␉␉uint64_t fidvid = 0;␊ |
951 | ␉␉␉␉uint64_t cpuMult;␊ |
952 | ␉␉␉␉uint64_t fid;␊ |
953 | ␊ |
954 | ␉␉␉␉fidvid = rdmsr64(K8_FIDVID_STATUS);␊ |
955 | ␉␉␉␉fid = bitfield(fidvid, 5, 0);␊ |
956 | ␊ |
957 | ␉␉␉␉cpuMult = (fid + 8) / 2;␊ |
958 | ␉␉␉␉currcoef = cpuMult;␊ |
959 | ␊ |
960 | ␉␉␉␉cpuMultN2 = (fidvid & (uint64_t)bit(0));␊ |
961 | ␉␉␉␉currdiv = cpuMultN2;␊ |
962 | ␉␉␉␉/****** Addon END ******/␊ |
963 | ␉␉␉}␊ |
964 | ␉␉␉␉break;␊ |
965 | ␊ |
966 | ␉␉␉case 0x10: /*** AMD Family 10h ***/␊ |
967 | ␉␉␉{␊ |
968 | ␉␉␉␉uint64_t cofvid = 0;␊ |
969 | ␉␉␉␉uint64_t cpuMult;␊ |
970 | ␉␉␉␉uint64_t divisor = 0;␊ |
971 | ␉␉␉␉uint64_t did;␊ |
972 | ␉␉␉␉uint64_t fid;␊ |
973 | ␊ |
974 | ␉␉␉␉cofvid = rdmsr64(K10_COFVID_STATUS);␊ |
975 | ␉␉␉␉did = bitfield(cofvid, 8, 6);␊ |
976 | ␉␉␉␉fid = bitfield(cofvid, 5, 0);␊ |
977 | ␉␉␉␉if (did == 0) divisor = 2;␊ |
978 | ␉␉␉␉else if (did == 1) divisor = 4;␊ |
979 | ␉␉␉␉else if (did == 2) divisor = 8;␊ |
980 | ␉␉␉␉else if (did == 3) divisor = 16;␊ |
981 | ␉␉␉␉else if (did == 4) divisor = 32;␊ |
982 | ␊ |
983 | ␉␉␉␉cpuMult = (fid + 16) / divisor;␊ |
984 | ␉␉␉␉currcoef = cpuMult;␊ |
985 | ␊ |
986 | ␉␉␉␉cpuMultN2 = (cofvid & (uint64_t)bit(0));␊ |
987 | ␉␉␉␉currdiv = cpuMultN2;␊ |
988 | ␊ |
989 | ␉␉␉␉/****** Addon END ******/␊ |
990 | ␉␉␉}␊ |
991 | ␉␉␉break;␊ |
992 | ␊ |
993 | ␉␉␉case 0x11: /*** AMD Family 11h ***/␊ |
994 | ␉␉␉{␊ |
995 | ␉␉␉␉uint64_t cofvid = 0;␊ |
996 | ␉␉␉␉uint64_t cpuMult;␊ |
997 | ␉␉␉␉uint64_t divisor = 0;␊ |
998 | ␉␉␉␉uint64_t did;␊ |
999 | ␉␉␉␉uint64_t fid;␊ |
1000 | ␊ |
1001 | ␉␉␉␉cofvid = rdmsr64(K10_COFVID_STATUS);␊ |
1002 | ␉␉␉␉did = bitfield(cofvid, 8, 6);␊ |
1003 | ␉␉␉␉fid = bitfield(cofvid, 5, 0);␊ |
1004 | ␉␉␉␉if (did == 0) divisor = 2;␊ |
1005 | ␉␉␉␉else if (did == 1) divisor = 4;␊ |
1006 | ␉␉␉␉else if (did == 2) divisor = 8;␊ |
1007 | ␉␉␉␉else if (did == 3) divisor = 16;␊ |
1008 | ␉␉␉␉else if (did == 4) divisor = 32;␊ |
1009 | ␊ |
1010 | ␉␉␉␉cpuMult = (fid + 8) / divisor;␊ |
1011 | ␉␉␉␉currcoef = cpuMult;␊ |
1012 | ␊ |
1013 | ␉␉␉␉cpuMultN2 = (cofvid & (uint64_t)bit(0));␊ |
1014 | ␉␉␉␉currdiv = cpuMultN2;␊ |
1015 | ␊ |
1016 | ␉␉␉␉/****** Addon END ******/␊ |
1017 | ␉␉␉}␊ |
1018 | break;␊ |
1019 | ␊ |
1020 | ␉␉␉case 0x12: /*** AMD Family 12h ***/␊ |
1021 | ␉␉␉{␊ |
1022 | ␉␉␉␉// 8:4 CpuFid: current CPU core frequency ID␊ |
1023 | ␉␉␉␉// 3:0 CpuDid: current CPU core divisor ID␊ |
1024 | ␉␉␉␉uint64_t prfsts,CpuFid,CpuDid;␊ |
1025 | ␉␉␉␉prfsts = rdmsr64(K10_COFVID_STATUS);␊ |
1026 | ␊ |
1027 | ␉␉␉␉CpuDid = bitfield(prfsts, 3, 0) ;␊ |
1028 | ␉␉␉␉CpuFid = bitfield(prfsts, 8, 4) ;␊ |
1029 | ␉␉␉␉uint64_t divisor;␊ |
1030 | ␉␉␉␉switch (CpuDid)␊ |
1031 | ␉␉␉␉{␊ |
1032 | ␉␉␉␉␉case 0: divisor = 1; break;␊ |
1033 | ␉␉␉␉␉case 1: divisor = (3/2); break;␊ |
1034 | ␉␉␉␉␉case 2: divisor = 2; break;␊ |
1035 | ␉␉␉␉␉case 3: divisor = 3; break;␊ |
1036 | ␉␉␉␉␉case 4: divisor = 4; break;␊ |
1037 | ␉␉␉␉␉case 5: divisor = 6; break;␊ |
1038 | ␉␉␉␉␉case 6: divisor = 8; break;␊ |
1039 | ␉␉␉␉␉case 7: divisor = 12; break;␊ |
1040 | ␉␉␉␉␉case 8: divisor = 16; break;␊ |
1041 | ␉␉␉␉␉default: divisor = 1; break;␊ |
1042 | ␉␉␉␉}␊ |
1043 | ␉␉␉␉currcoef = (CpuFid + 0x10) / divisor;␊ |
1044 | ␊ |
1045 | ␉␉␉␉cpuMultN2 = (prfsts & (uint64_t)bit(0));␊ |
1046 | ␉␉␉␉currdiv = cpuMultN2;␊ |
1047 | ␊ |
1048 | ␉␉␉}␊ |
1049 | ␉␉␉␉break;␊ |
1050 | ␊ |
1051 | ␉␉␉case 0x14: /* K14 */␊ |
1052 | ␊ |
1053 | ␉␉␉{␊ |
1054 | ␉␉␉␉// 8:4: current CPU core divisor ID most significant digit␊ |
1055 | ␉␉␉␉// 3:0: current CPU core divisor ID least significant digit␊ |
1056 | ␉␉␉␉uint64_t prfsts;␊ |
1057 | ␉␉␉␉prfsts = rdmsr64(K10_COFVID_STATUS);␊ |
1058 | ␊ |
1059 | ␉␉␉␉uint64_t CpuDidMSD,CpuDidLSD;␊ |
1060 | ␉␉␉␉CpuDidMSD = bitfield(prfsts, 8, 4) ;␊ |
1061 | ␉␉␉␉CpuDidLSD = bitfield(prfsts, 3, 0) ;␊ |
1062 | ␊ |
1063 | ␉␉␉␉uint64_t frequencyId = 0x10;␊ |
1064 | ␉␉␉␉currcoef = (frequencyId + 0x10) /␊ |
1065 | ␉␉␉␉␉(CpuDidMSD + (CpuDidLSD * 0.25) + 1);␊ |
1066 | ␉␉␉␉currdiv = ((CpuDidMSD) + 1) << 2;␊ |
1067 | ␉␉␉␉currdiv += bitfield(msr, 3, 0);␊ |
1068 | ␊ |
1069 | ␉␉␉␉cpuMultN2 = (prfsts & (uint64_t)bit(0));␊ |
1070 | ␉␉␉␉currdiv = cpuMultN2;␊ |
1071 | ␉␉␉}␊ |
1072 | ␊ |
1073 | ␉␉␉␉break;␊ |
1074 | ␊ |
1075 | ␉␉␉case 0x15: /*** AMD Family 15h ***/␊ |
1076 | ␉␉␉case 0x06: /*** AMD Family 06h ***/␊ |
1077 | ␉␉␉{␊ |
1078 | ␊ |
1079 | ␉␉␉␉uint64_t cofvid = 0;␊ |
1080 | ␉␉␉␉uint64_t cpuMult;␊ |
1081 | ␉␉␉␉uint64_t divisor = 0;␊ |
1082 | ␉␉␉␉uint64_t did;␊ |
1083 | ␉␉␉␉uint64_t fid;␊ |
1084 | ␊ |
1085 | ␉␉␉␉cofvid = rdmsr64(K10_COFVID_STATUS);␊ |
1086 | ␉␉␉␉did = bitfield(cofvid, 8, 6);␊ |
1087 | ␉␉␉␉fid = bitfield(cofvid, 5, 0);␊ |
1088 | ␉␉␉␉if (did == 0) divisor = 2;␊ |
1089 | ␉␉␉␉else if (did == 1) divisor = 4;␊ |
1090 | ␉␉␉␉else if (did == 2) divisor = 8;␊ |
1091 | ␉␉␉␉else if (did == 3) divisor = 16;␊ |
1092 | ␉␉␉␉else if (did == 4) divisor = 32;␊ |
1093 | ␊ |
1094 | ␉␉␉␉cpuMult = (fid + 16) / divisor;␊ |
1095 | ␉␉␉␉currcoef = cpuMult;␊ |
1096 | ␊ |
1097 | ␉␉␉␉cpuMultN2 = (cofvid & (uint64_t)bit(0));␊ |
1098 | ␉␉␉␉currdiv = cpuMultN2;␊ |
1099 | ␉␉␉}␊ |
1100 | ␉␉␉␉break;␊ |
1101 | ␊ |
1102 | ␉␉␉case 0x16: /*** AMD Family 16h kabini ***/␊ |
1103 | ␉␉␉{␊ |
1104 | ␉␉␉␉uint64_t cofvid = 0;␊ |
1105 | ␉␉␉␉uint64_t cpuMult;␊ |
1106 | ␉␉␉␉uint64_t divisor = 0;␊ |
1107 | ␉␉␉␉uint64_t did;␊ |
1108 | ␉␉␉␉uint64_t fid;␊ |
1109 | ␊ |
1110 | ␉␉␉␉cofvid = rdmsr64(K10_COFVID_STATUS);␊ |
1111 | ␉␉␉␉did = bitfield(cofvid, 8, 6);␊ |
1112 | ␉␉␉␉fid = bitfield(cofvid, 5, 0);␊ |
1113 | ␉␉␉␉if (did == 0) divisor = 1;␊ |
1114 | ␉␉␉␉else if (did == 1) divisor = 2;␊ |
1115 | ␉␉␉␉else if (did == 2) divisor = 4;␊ |
1116 | ␉␉␉␉else if (did == 3) divisor = 8;␊ |
1117 | ␉␉␉␉else if (did == 4) divisor = 16;␊ |
1118 | ␊ |
1119 | ␉␉␉␉cpuMult = (fid + 16) / divisor;␊ |
1120 | ␉␉␉␉currcoef = cpuMult;␊ |
1121 | ␊ |
1122 | ␉␉␉␉cpuMultN2 = (cofvid & (uint64_t)bit(0));␊ |
1123 | ␉␉␉␉currdiv = cpuMultN2;␊ |
1124 | ␉␉␉␉/****** Addon END ******/␊ |
1125 | ␉␉␉}␊ |
1126 | ␉␉␉␉break;␊ |
1127 | ␊ |
1128 | ␉␉␉default:␊ |
1129 | ␉␉␉{␊ |
1130 | ␉␉␉␉typedef unsigned long long vlong;␊ |
1131 | ␉␉␉␉uint64_t prfsts;␊ |
1132 | ␉␉␉␉prfsts = rdmsr64(K10_COFVID_STATUS);␊ |
1133 | ␉␉␉␉uint64_t r;␊ |
1134 | ␉␉␉␉vlong hz;␊ |
1135 | ␉␉␉␉r = (prfsts>>6) & 0x07;␊ |
1136 | ␉␉␉␉hz = (((prfsts & 0x3f)+0x10)*100000000ll)/(1<<r);␊ |
1137 | ␊ |
1138 | ␉␉␉␉currcoef = hz / (200 * Mega);␊ |
1139 | ␉␉␉}␊ |
1140 | ␉␉}␊ |
1141 | ␊ |
1142 | ␉␉if (currcoef)␊ |
1143 | ␉␉{␊ |
1144 | ␉␉␉if (currdiv)␊ |
1145 | ␉␉␉{␊ |
1146 | ␉␉␉␉busFrequency = ((tscFreq * 2) / ((currcoef * 2) + 1));␊ |
1147 | ␉␉␉␉busFCvtt2n = ((1 * Giga) << 32) / busFrequency;␊ |
1148 | ␉␉␉␉tscFCvtt2n = busFCvtt2n * 2 / (1 + (2 * currcoef));␊ |
1149 | ␉␉␉␉cpuFrequency = ((1 * Giga) << 32) / tscFCvtt2n;␊ |
1150 | ␊ |
1151 | ␉␉␉␉DBG("%d.%d\n", currcoef / currdiv, ((currcoef % currdiv) * 100) / currdiv);␊ |
1152 | ␉␉␉}␊ |
1153 | ␉␉␉else␊ |
1154 | ␉␉␉{␊ |
1155 | ␉␉␉␉busFrequency = (tscFreq / currcoef);␊ |
1156 | ␉␉␉␉busFCvtt2n = ((1 * Giga) << 32) / busFrequency;␊ |
1157 | ␉␉␉␉tscFCvtt2n = busFCvtt2n / currcoef;␊ |
1158 | ␉␉␉␉cpuFrequency = ((1 * Giga) << 32) / tscFCvtt2n;␊ |
1159 | ␉␉␉␉DBG("%d\n", currcoef);␊ |
1160 | ␉␉␉}␊ |
1161 | ␉␉}␊ |
1162 | ␉␉else if (!cpuFrequency)␊ |
1163 | ␉␉{␊ |
1164 | ␉␉␉cpuFrequency = tscFreq;␊ |
1165 | ␉␉}␊ |
1166 | ␉}␊ |
1167 | ␊ |
1168 | #if 0␊ |
1169 | ␉if (!busFrequency)␊ |
1170 | ␉{␊ |
1171 | ␉␉busFrequency = (DEFAULT_FSB * 1000);␊ |
1172 | ␉␉DBG("\tCPU: busFrequency = 0! using the default value for FSB!\n");␊ |
1173 | ␉␉cpuFrequency = tscFreq;␊ |
1174 | ␉}␊ |
1175 | ␊ |
1176 | ␉DBG("\tcpu freq = 0x%016llxn", timeRDTSC() * 20);␊ |
1177 | ␊ |
1178 | #endif␊ |
1179 | ␊ |
1180 | ␉outb(0x21U, pic0_mask); // restore PIC0 interrupts␊ |
1181 | ␊ |
1182 | ␉p->CPU.MaxCoef = maxcoef = currcoef;␊ |
1183 | ␉p->CPU.MaxDiv = maxdiv = currdiv;␊ |
1184 | ␉p->CPU.CurrCoef = currcoef;␊ |
1185 | ␉p->CPU.CurrDiv = currdiv;␊ |
1186 | ␉p->CPU.TSCFrequency = tscFreq;␊ |
1187 | ␉p->CPU.FSBFrequency = busFrequency;␊ |
1188 | ␉p->CPU.CPUFrequency = cpuFrequency;␊ |
1189 | ␊ |
1190 | ␉// keep formatted with spaces instead of tabs␊ |
1191 | ␊ |
1192 | ␉DBG("\tCPUID Raw Values:\n");␊ |
1193 | ␉for (i = 0; i < CPUID_MAX; i++)␊ |
1194 | ␉{␊ |
1195 | ␉␉DBG("\t%02d: %08X-%08X-%08X-%08X\n", i, p->CPU.CPUID[i][eax], p->CPU.CPUID[i][ebx], p->CPU.CPUID[i][ecx], p->CPU.CPUID[i][edx]);␊ |
1196 | ␉}␊ |
1197 | ␉DBG("\n");␊ |
1198 | ␉DBG("\tBrand String: %s\n",␉␉p->CPU.BrandString);␉␉// Processor name (BIOS)␊ |
1199 | ␉DBG("\tVendor: 0x%X\n",␉p->CPU.Vendor);␉␉␉// Vendor ex: GenuineIntel␊ |
1200 | ␉DBG("\tFamily: 0x%X\n",␉p->CPU.Family);␉␉␉// Family ex: 6 (06h)␊ |
1201 | ␉DBG("\tExtFamily: 0x%X\n",␉p->CPU.ExtFamily);␊ |
1202 | ␉DBG("\tSignature: 0x%08X\n",␉p->CPU.Signature);␉␉// CPUID signature␊ |
1203 | ␉/*switch (p->CPU.Type) {␊ |
1204 | ␉␉case PT_OEM:␊ |
1205 | ␉␉␉DBG("\tProcessor type: Intel Original OEM Processor\n");␊ |
1206 | ␉␉␉break;␊ |
1207 | ␉␉case PT_OD:␊ |
1208 | ␉␉␉DBG("\tProcessor type: Intel Over Drive Processor\n");␊ |
1209 | ␉␉␉break;␊ |
1210 | ␉␉case PT_DUAL:␊ |
1211 | ␉␉␉DBG("\tProcessor type: Intel Dual Processor\n");␊ |
1212 | ␉␉␉break;␊ |
1213 | ␉␉case PT_RES:␊ |
1214 | ␉␉␉DBG("\tProcessor type: Intel Reserved\n");␊ |
1215 | ␉␉␉break;␊ |
1216 | ␉␉default:␊ |
1217 | ␉␉␉break;␊ |
1218 | ␉}*/␊ |
1219 | ␉DBG("\tModel: 0x%X\n",␉p->CPU.Model);␉␉␉// Model ex: 37 (025h)␊ |
1220 | ␉DBG("\tExtModel: 0x%X\n",␉p->CPU.ExtModel);␊ |
1221 | ␉DBG("\tStepping: 0x%X\n",␉p->CPU.Stepping);␉␉// Stepping ex: 5 (05h)␊ |
1222 | ␉DBG("\tMaxCoef: %d\n",␉␉p->CPU.MaxCoef);␊ |
1223 | ␉DBG("\tCurrCoef: %d\n",␉␉p->CPU.CurrCoef);␊ |
1224 | ␉DBG("\tMaxDiv: %d\n",␉␉p->CPU.MaxDiv);␊ |
1225 | ␉DBG("\tCurrDiv: %d\n",␉␉p->CPU.CurrDiv);␊ |
1226 | ␉DBG("\tTSCFreq: %dMHz\n",␉p->CPU.TSCFrequency / 1000000);␊ |
1227 | ␉DBG("\tFSBFreq: %dMHz\n",␉(p->CPU.FSBFrequency + 500000) / 1000000);␊ |
1228 | ␉DBG("\tCPUFreq: %dMHz\n",␉p->CPU.CPUFrequency / 1000000);␊ |
1229 | ␉DBG("\tCores: %d\n",␉␉p->CPU.NoCores);␉␉// Cores␊ |
1230 | ␉DBG("\tLogical processor: %d\n",␉␉p->CPU.NoThreads);␉␉// Logical procesor␊ |
1231 | ␉DBG("\tFeatures: 0x%08x\n",␉p->CPU.Features);␊ |
1232 | //␉DBG("\tMicrocode version: %d\n",␉␉p->CPU.MCodeVersion);␉␉// CPU microcode version␊ |
1233 | ␊ |
1234 | ␉verbose("\n");␊ |
1235 | #if DEBUG_CPU␊ |
1236 | ␉pause();␊ |
1237 | #endif␊ |
1238 | }␊ |
1239 | |