1 | /*␊ |
2 | * Copyright 2008 Islam Ahmed Zaid. All rights reserved. <azismed@gmail.com>␊ |
3 | * AsereBLN: 2009: cleanup and bugfix␊ |
4 | */␊ |
5 | ␊ |
6 | #include "libsaio.h"␊ |
7 | #include "platform.h"␊ |
8 | #include "cpu.h"␊ |
9 | #include "bootstruct.h"␊ |
10 | #include "boot.h"␊ |
11 | ␊ |
12 | #ifndef DEBUG_CPU␊ |
13 | #define DEBUG_CPU 0␊ |
14 | #endif␊ |
15 | ␊ |
16 | #if DEBUG_CPU␊ |
17 | #define DBG(x...)␉␉printf(x)␊ |
18 | #else␊ |
19 | #define DBG(x...)␉␉msglog(x)␊ |
20 | #endif␊ |
21 | ␊ |
22 | /*␊ |
23 | * timeRDTSC()␊ |
24 | * This routine sets up PIT counter 2 to count down 1/20 of a second.␊ |
25 | * It pauses until the value is latched in the counter␊ |
26 | * and then reads the time stamp counter to return to the caller.␊ |
27 | */␊ |
28 | uint64_t timeRDTSC(void)␊ |
29 | {␊ |
30 | ␉int␉␉attempts = 0;␊ |
31 | ␉uint64_t latchTime;␊ |
32 | ␉uint64_t␉saveTime,intermediate;␊ |
33 | ␉unsigned int timerValue, lastValue;␊ |
34 | ␉//boolean_t␉int_enabled;␊ |
35 | ␉/*␊ |
36 | ␉ * Table of correction factors to account for␊ |
37 | ␉ *␉ - timer counter quantization errors, and␊ |
38 | ␉ *␉ - undercounts 0..5␊ |
39 | ␉ */␊ |
40 | #define SAMPLE_CLKS_EXACT␉(((double) CLKNUM) / 20.0)␊ |
41 | #define SAMPLE_CLKS_INT␉␉((int) CLKNUM / 20)␊ |
42 | #define SAMPLE_NSECS␉␉(2000000000LL)␊ |
43 | #define SAMPLE_MULTIPLIER␉(((double)SAMPLE_NSECS)*SAMPLE_CLKS_EXACT)␊ |
44 | #define ROUND64(x)␉␉((uint64_t)((x) + 0.5))␊ |
45 | ␉uint64_t␉scale[6] = {␊ |
46 | ␉␉ROUND64(SAMPLE_MULTIPLIER/(double)(SAMPLE_CLKS_INT-0)), ␊ |
47 | ␉␉ROUND64(SAMPLE_MULTIPLIER/(double)(SAMPLE_CLKS_INT-1)), ␊ |
48 | ␉␉ROUND64(SAMPLE_MULTIPLIER/(double)(SAMPLE_CLKS_INT-2)), ␊ |
49 | ␉␉ROUND64(SAMPLE_MULTIPLIER/(double)(SAMPLE_CLKS_INT-3)), ␊ |
50 | ␉␉ROUND64(SAMPLE_MULTIPLIER/(double)(SAMPLE_CLKS_INT-4)), ␊ |
51 | ␉␉ROUND64(SAMPLE_MULTIPLIER/(double)(SAMPLE_CLKS_INT-5))␊ |
52 | ␉};␊ |
53 | ␊ |
54 | ␉//int_enabled = ml_set_interrupts_enabled(FALSE);␊ |
55 | ␊ |
56 | restart:␊ |
57 | ␉if (attempts >= 9) // increase to up to 9 attempts.␊ |
58 | ␉{␊ |
59 | ␉ // This will flash-reboot. TODO: Use tscPanic instead.␊ |
60 | ␉␉printf("Timestamp counter calibation failed with %d attempts\n", attempts);␊ |
61 | ␉}␊ |
62 | ␉attempts++;␊ |
63 | ␉enable_PIT2();␉␉// turn on PIT2␊ |
64 | ␉set_PIT2(0);␉␉// reset timer 2 to be zero␊ |
65 | ␉latchTime = rdtsc64();␉// get the time stamp to time ␊ |
66 | ␉latchTime = get_PIT2(&timerValue) - latchTime; // time how long this takes␊ |
67 | ␉set_PIT2(SAMPLE_CLKS_INT);␉// set up the timer for (almost) 1/20th a second␊ |
68 | ␉saveTime = rdtsc64();␉// now time how long a 20th a second is...␊ |
69 | ␉get_PIT2(&lastValue);␊ |
70 | ␉get_PIT2(&lastValue);␉// read twice, first value may be unreliable␊ |
71 | ␉do {␊ |
72 | ␉␉intermediate = get_PIT2(&timerValue);␊ |
73 | ␉␉if (timerValue > lastValue)␊ |
74 | ␉␉{␊ |
75 | ␉␉␉// Timer wrapped␊ |
76 | ␉␉␉set_PIT2(0);␊ |
77 | ␉␉␉disable_PIT2();␊ |
78 | ␉␉␉goto restart;␊ |
79 | ␉␉}␊ |
80 | ␉␉lastValue = timerValue;␊ |
81 | ␉} while (timerValue > 5);␊ |
82 | ␉printf("timerValue␉ %d\n",timerValue);␊ |
83 | ␉printf("intermediate 0x%016llx\n",intermediate);␊ |
84 | ␉printf("saveTime␉ 0x%016llx\n",saveTime);␊ |
85 | ␊ |
86 | ␉intermediate -= saveTime;␉␉// raw count for about 1/20 second␊ |
87 | ␉intermediate *= scale[timerValue];␉// rescale measured time spent␊ |
88 | ␉intermediate /= SAMPLE_NSECS;␉// so its exactly 1/20 a second␊ |
89 | ␉intermediate += latchTime;␉␉// add on our save fudge␊ |
90 | ␊ |
91 | ␉set_PIT2(0);␉␉␉// reset timer 2 to be zero␊ |
92 | ␉disable_PIT2();␉␉␉// turn off PIT 2␊ |
93 | ␉␊ |
94 | ␉//ml_set_interrupts_enabled(int_enabled);␊ |
95 | ␉return intermediate;␊ |
96 | }␊ |
97 | ␊ |
98 | /*␊ |
99 | * DFE: Measures the TSC frequency in Hz (64-bit) using the ACPI PM timer␊ |
100 | */␊ |
101 | static uint64_t measure_tsc_frequency(void)␊ |
102 | {␊ |
103 | ␉uint64_t tscStart;␊ |
104 | ␉uint64_t tscEnd;␊ |
105 | ␉uint64_t tscDelta = 0xffffffffffffffffULL;␊ |
106 | ␉unsigned long pollCount;␊ |
107 | ␉uint64_t retval = 0;␊ |
108 | ␉int i;␊ |
109 | ␊ |
110 | ␉/* Time how many TSC ticks elapse in 30 msec using the 8254 PIT␊ |
111 | ␉ * counter 2. We run this loop 3 times to make sure the cache␊ |
112 | ␉ * is hot and we take the minimum delta from all of the runs.␊ |
113 | ␉ * That is to say that we're biased towards measuring the minimum␊ |
114 | ␉ * number of TSC ticks that occur while waiting for the timer to␊ |
115 | ␉ * expire. That theoretically helps avoid inconsistencies when␊ |
116 | ␉ * running under a VM if the TSC is not virtualized and the host␊ |
117 | ␉ * steals time.␉ The TSC is normally virtualized for VMware.␊ |
118 | ␉ */␊ |
119 | ␉for(i = 0; i < 10; ++i)␊ |
120 | ␉{␊ |
121 | ␉␉enable_PIT2();␊ |
122 | ␉␉set_PIT2_mode0(CALIBRATE_LATCH);␊ |
123 | ␉␉tscStart = rdtsc64();␊ |
124 | ␉␉pollCount = poll_PIT2_gate();␊ |
125 | ␉␉tscEnd = rdtsc64();␊ |
126 | ␉␉/* The poll loop must have run at least a few times for accuracy */␊ |
127 | ␉␉if (pollCount <= 1) {␊ |
128 | ␉␉␉continue;␊ |
129 | ␉␉}␊ |
130 | ␉␉/* The TSC must increment at LEAST once every millisecond.␊ |
131 | ␉␉ * We should have waited exactly 30 msec so the TSC delta should␊ |
132 | ␉␉ * be >= 30. Anything less and the processor is way too slow.␊ |
133 | ␉␉ */␊ |
134 | ␉␉if ((tscEnd - tscStart) <= CALIBRATE_TIME_MSEC) {␊ |
135 | ␉␉␉continue;␊ |
136 | ␉␉}␊ |
137 | ␉␉// tscDelta = MIN(tscDelta, (tscEnd - tscStart))␊ |
138 | ␉␉if ( (tscEnd - tscStart) < tscDelta ) {␊ |
139 | ␉␉␉tscDelta = tscEnd - tscStart;␊ |
140 | ␉␉}␊ |
141 | ␉}␊ |
142 | ␉/* tscDelta is now the least number of TSC ticks the processor made in␊ |
143 | ␉ * a timespan of 0.03 s (e.g. 30 milliseconds)␊ |
144 | ␉ * Linux thus divides by 30 which gives the answer in kiloHertz because␊ |
145 | ␉ * 1 / ms = kHz. But we're xnu and most of the rest of the code uses␊ |
146 | ␉ * Hz so we need to convert our milliseconds to seconds. Since we're␊ |
147 | ␉ * dividing by the milliseconds, we simply multiply by 1000.␊ |
148 | ␉ */␊ |
149 | ␊ |
150 | ␉/* Unlike linux, we're not limited to 32-bit, but we do need to take care␊ |
151 | ␉ * that we're going to multiply by 1000 first so we do need at least some␊ |
152 | ␉ * arithmetic headroom. For now, 32-bit should be enough.␊ |
153 | ␉ * Also unlike Linux, our compiler can do 64-bit integer arithmetic.␊ |
154 | ␉ */␊ |
155 | ␉if (tscDelta > (1ULL<<32)) {␊ |
156 | ␉␉retval = 0;␊ |
157 | ␉} else {␊ |
158 | ␉␉retval = tscDelta * 1000 / 30;␊ |
159 | ␉}␊ |
160 | ␉disable_PIT2();␊ |
161 | ␉return retval;␊ |
162 | }␊ |
163 | ␊ |
164 | /*␊ |
165 | * Original comment/code:␊ |
166 | * "DFE: Measures the Max Performance Frequency in Hz (64-bit)"␊ |
167 | *␊ |
168 | * Measures the Actual Performance Frequency in Hz (64-bit)␊ |
169 | * (just a naming change, mperf --> aperf )␊ |
170 | */␊ |
171 | static uint64_t measure_aperf_frequency(void)␊ |
172 | {␊ |
173 | ␉uint64_t aperfStart;␊ |
174 | ␉uint64_t aperfEnd;␊ |
175 | ␉uint64_t aperfDelta = 0xffffffffffffffffULL;␊ |
176 | ␉unsigned long pollCount;␊ |
177 | ␉uint64_t retval = 0;␊ |
178 | ␉int i;␊ |
179 | ␊ |
180 | ␉/* Time how many APERF ticks elapse in 30 msec using the 8254 PIT␊ |
181 | ␉ * counter 2. We run this loop 3 times to make sure the cache␊ |
182 | ␉ * is hot and we take the minimum delta from all of the runs.␊ |
183 | ␉ * That is to say that we're biased towards measuring the minimum␊ |
184 | ␉ * number of APERF ticks that occur while waiting for the timer to␊ |
185 | ␉ * expire.␊ |
186 | ␉ */␊ |
187 | ␉for(i = 0; i < 10; ++i)␊ |
188 | ␉{␊ |
189 | ␉␉enable_PIT2();␊ |
190 | ␉␉set_PIT2_mode0(CALIBRATE_LATCH);␊ |
191 | ␉␉aperfStart = rdmsr64(MSR_AMD_APERF);␊ |
192 | ␉␉pollCount = poll_PIT2_gate();␊ |
193 | ␉␉aperfEnd = rdmsr64(MSR_AMD_APERF);␊ |
194 | ␉␉/* The poll loop must have run at least a few times for accuracy */␊ |
195 | ␉␉if (pollCount <= 1) {␊ |
196 | ␉␉␉continue;␊ |
197 | ␉␉}␊ |
198 | ␉␉/* The TSC must increment at LEAST once every millisecond.␊ |
199 | ␉␉ * We should have waited exactly 30 msec so the APERF delta should␊ |
200 | ␉␉ * be >= 30. Anything less and the processor is way too slow.␊ |
201 | ␉␉ */␊ |
202 | ␉␉if ((aperfEnd - aperfStart) <= CALIBRATE_TIME_MSEC) {␊ |
203 | ␉␉␉continue;␊ |
204 | ␉␉}␊ |
205 | ␉␉// tscDelta = MIN(tscDelta, (tscEnd - tscStart))␊ |
206 | ␉␉if ( (aperfEnd - aperfStart) < aperfDelta ) {␊ |
207 | ␉␉␉aperfDelta = aperfEnd - aperfStart;␊ |
208 | ␉␉}␊ |
209 | ␉}␊ |
210 | ␉/* mperfDelta is now the least number of MPERF ticks the processor made in␊ |
211 | ␉ * a timespan of 0.03 s (e.g. 30 milliseconds)␊ |
212 | ␉ */␊ |
213 | ␊ |
214 | ␉if (aperfDelta > (1ULL<<32)) {␊ |
215 | ␉␉retval = 0;␊ |
216 | ␉} else {␊ |
217 | ␉␉retval = aperfDelta * 1000 / 30;␊ |
218 | ␉}␊ |
219 | ␉disable_PIT2();␊ |
220 | ␉return retval;␊ |
221 | }␊ |
222 | ␊ |
223 | /*␊ |
224 | * Calculates the FSB and CPU frequencies using specific MSRs for each CPU␊ |
225 | * - multi. is read from a specific MSR. In the case of Intel, there is:␊ |
226 | *␉ a max multi. (used to calculate the FSB freq.),␊ |
227 | *␉ and a current multi. (used to calculate the CPU freq.)␊ |
228 | * - fsbFrequency = tscFrequency / multi␊ |
229 | * - cpuFrequency = fsbFrequency * multi␊ |
230 | */␊ |
231 | void scan_cpu(PlatformInfo_t *p)␊ |
232 | {␊ |
233 | ␉uint64_t␉tscFrequency, fsbFrequency, cpuFrequency;␊ |
234 | ␉uint64_t␉msr, flex_ratio;␊ |
235 | ␉uint8_t␉␉maxcoef, maxdiv, currcoef, bus_ratio_max, currdiv;␊ |
236 | ␉const char␉*newratio;␊ |
237 | ␉int␉␉␉len, myfsb;␊ |
238 | ␉uint8_t␉␉bus_ratio_min;␊ |
239 | ␉uint32_t␉max_ratio, min_ratio;␊ |
240 | ␊ |
241 | ␉max_ratio = min_ratio = myfsb = bus_ratio_min = 0;␊ |
242 | ␉maxcoef = maxdiv = bus_ratio_max = currcoef = currdiv = 0;␊ |
243 | ␊ |
244 | ␉/* get cpuid values */␊ |
245 | ␉do_cpuid(0x00000000, p->CPU.CPUID[CPUID_0]);␊ |
246 | ␉do_cpuid(0x00000001, p->CPU.CPUID[CPUID_1]);␊ |
247 | ␊ |
248 | ␉do_cpuid(0x00000002, p->CPU.CPUID[CPUID_2]);␊ |
249 | ␉do_cpuid(0x00000003, p->CPU.CPUID[CPUID_3]);␊ |
250 | ␉do_cpuid2(0x00000004, 0, p->CPU.CPUID[CPUID_4]);␊ |
251 | ␊ |
252 | ␉do_cpuid(0x80000000, p->CPU.CPUID[CPUID_80]);␊ |
253 | ␉if (p->CPU.CPUID[CPUID_0][0] >= 0x5) {␊ |
254 | ␉␉do_cpuid(5, p->CPU.CPUID[CPUID_5]);␊ |
255 | ␉}␊ |
256 | ␉if (p->CPU.CPUID[CPUID_0][0] >= 6) {␊ |
257 | ␉␉do_cpuid(6, p->CPU.CPUID[CPUID_6]);␊ |
258 | ␉}␊ |
259 | ␉if ((p->CPU.CPUID[CPUID_80][0] & 0x0000000f) >= 8) {␊ |
260 | ␉␉do_cpuid(0x80000008, p->CPU.CPUID[CPUID_88]);␊ |
261 | ␉␉do_cpuid(0x80000001, p->CPU.CPUID[CPUID_81]);␊ |
262 | ␉} else if ((p->CPU.CPUID[CPUID_80][0] & 0x0000000f) >= 1) {␊ |
263 | ␉␉do_cpuid(0x80000001, p->CPU.CPUID[CPUID_81]);␊ |
264 | ␉}␊ |
265 | ␊ |
266 | // #if DEBUG_CPU␊ |
267 | ␉{␊ |
268 | ␉␉int␉␉i;␊ |
269 | ␉␉DBG("CPUID Raw Values:\n");␊ |
270 | ␉␉for (i = 0; i < CPUID_MAX; i++) {␊ |
271 | ␉␉␉DBG("%02d: %08x-%08x-%08x-%08x\n", i,␊ |
272 | ␉␉␉␉ p->CPU.CPUID[i][0], p->CPU.CPUID[i][1],␊ |
273 | ␉␉␉␉ p->CPU.CPUID[i][2], p->CPU.CPUID[i][3]);␊ |
274 | ␉␉}␊ |
275 | ␉}␊ |
276 | // #endif␊ |
277 | ␊ |
278 | /*␊ |
279 | EAX (Intel):␊ |
280 | 31 28 27 20 19 16 1514 1312 11 8 7 4 3 0␊ |
281 | +--------+----------------+--------+----+----+--------+--------+--------+␊ |
282 | |########|Extended family |Extmodel|####|type|familyid| model |stepping|␊ |
283 | +--------+----------------+--------+----+----+--------+--------+--------+␊ |
284 | ␊ |
285 | EAX (AMD):␊ |
286 | 31 28 27 20 19 16 1514 1312 11 8 7 4 3 0␊ |
287 | +--------+----------------+--------+----+----+--------+--------+--------+␊ |
288 | |########|Extended family |Extmodel|####|####|familyid| model |stepping|␊ |
289 | +--------+----------------+--------+----+----+--------+--------+--------+␊ |
290 | */␊ |
291 | ␊ |
292 | ␉p->CPU.Vendor␉␉= p->CPU.CPUID[CPUID_0][1];␊ |
293 | ␉p->CPU.Signature␉= p->CPU.CPUID[CPUID_1][0];␊ |
294 | ␉// stepping = cpu_feat_eax & 0xF;␊ |
295 | ␉p->CPU.Stepping␉␉= bitfield(p->CPU.CPUID[CPUID_1][0], 3, 0);␊ |
296 | ␉// model = (cpu_feat_eax >> 4) & 0xF;␊ |
297 | ␉p->CPU.Model␉␉= bitfield(p->CPU.CPUID[CPUID_1][0], 7, 4);␊ |
298 | ␉// family = (cpu_feat_eax >> 8) & 0xF;␊ |
299 | ␉p->CPU.Family␉␉= bitfield(p->CPU.CPUID[CPUID_1][0], 11, 8);␊ |
300 | ␉// type = (cpu_feat_eax >> 12) & 0x3;␊ |
301 | ␉//p->CPU.Type␉␉= bitfield(p->CPU.CPUID[CPUID_1][0], 13, 12);␊ |
302 | ␉// ext_model = (cpu_feat_eax >> 16) & 0xF;␊ |
303 | ␉p->CPU.ExtModel␉␉= bitfield(p->CPU.CPUID[CPUID_1][0], 19, 16);␊ |
304 | ␉// ext_family = (cpu_feat_eax >> 20) & 0xFF;␊ |
305 | ␉p->CPU.ExtFamily␉= bitfield(p->CPU.CPUID[CPUID_1][0], 27, 20);␊ |
306 | ␊ |
307 | ␉p->CPU.Model += (p->CPU.ExtModel << 4);␊ |
308 | ␊ |
309 | ␉if (p->CPU.Vendor == CPUID_VENDOR_INTEL &&␊ |
310 | ␉␉p->CPU.Family == 0x06 &&␊ |
311 | ␉␉p->CPU.Model >= CPU_MODEL_NEHALEM &&␊ |
312 | ␉␉p->CPU.Model != CPU_MODEL_ATOM␉␉// MSR is *NOT* available on the Intel Atom CPU␊ |
313 | ␉␉) {␊ |
314 | ␉␉msr = rdmsr64(MSR_CORE_THREAD_COUNT);␉␉␉␉␉// Undocumented MSR in Nehalem and newer CPUs␊ |
315 | ␉␉p->CPU.NoCores␉␉= bitfield((uint32_t)msr, 31, 16);␉// Using undocumented MSR to get actual values␊ |
316 | ␉␉p->CPU.NoThreads␉= bitfield((uint32_t)msr, 15, 0);␉// Using undocumented MSR to get actual values␊ |
317 | ␉} else if (p->CPU.Vendor == CPUID_VENDOR_AMD) {␊ |
318 | ␉␉p->CPU.NoThreads␉= bitfield(p->CPU.CPUID[CPUID_1][1], 23, 16);␊ |
319 | ␉␉p->CPU.NoCores␉␉= bitfield(p->CPU.CPUID[CPUID_88][2], 7, 0) + 1;␊ |
320 | ␉} else {␊ |
321 | ␉␉// Use previous method for Cores and Threads␊ |
322 | ␉␉p->CPU.NoThreads␉= bitfield(p->CPU.CPUID[CPUID_1][1], 23, 16);␊ |
323 | ␉␉p->CPU.NoCores␉␉= bitfield(p->CPU.CPUID[CPUID_4][0], 31, 26) + 1;␊ |
324 | ␉}␊ |
325 | ␊ |
326 | ␉/* get BrandString (if supported) */␊ |
327 | ␉/* Copyright: from Apple's XNU cpuid.c */␊ |
328 | ␉if (p->CPU.CPUID[CPUID_80][0] > 0x80000004) {␊ |
329 | ␉␉uint32_t␉reg[4];␊ |
330 | ␉␉char␉␉str[128], *s;␊ |
331 | ␉␉/*␊ |
332 | ␉␉ * The BrandString 48 bytes (max), guaranteed to␊ |
333 | ␉␉ * be NULL terminated.␊ |
334 | ␉␉ */␊ |
335 | ␉␉do_cpuid(0x80000002, reg);␊ |
336 | ␉␉bcopy((char *)reg, &str[0], 16);␊ |
337 | ␉␉do_cpuid(0x80000003, reg);␊ |
338 | ␉␉bcopy((char *)reg, &str[16], 16);␊ |
339 | ␉␉do_cpuid(0x80000004, reg);␊ |
340 | ␉␉bcopy((char *)reg, &str[32], 16);␊ |
341 | ␉␉for (s = str; *s != '\0'; s++) {␊ |
342 | ␉␉␉if (*s != ' ') {␊ |
343 | ␉␉␉␉break;␊ |
344 | ␉␉␉}␊ |
345 | ␉␉}␊ |
346 | ␉␉␊ |
347 | ␉␉strlcpy(p->CPU.BrandString, s, sizeof(p->CPU.BrandString));␊ |
348 | ␉␉␊ |
349 | ␉␉if (!strncmp(p->CPU.BrandString, CPU_STRING_UNKNOWN, MIN(sizeof(p->CPU.BrandString), strlen(CPU_STRING_UNKNOWN) + 1))) {␊ |
350 | ␉␉␉/*␊ |
351 | ␉␉␉ * This string means we have a firmware-programmable brand string,␊ |
352 | ␉␉␉ * and the firmware couldn't figure out what sort of CPU we have.␊ |
353 | ␉␉␉ */␊ |
354 | ␉␉␉p->CPU.BrandString[0] = '\0';␊ |
355 | ␉␉}␊ |
356 | ␉}␊ |
357 | ␊ |
358 | ␉/* setup features */␊ |
359 | ␉if ((bit(23) & p->CPU.CPUID[CPUID_1][3]) != 0) {␊ |
360 | ␉␉p->CPU.Features |= CPU_FEATURE_MMX;␊ |
361 | ␉}␊ |
362 | ␉if ((bit(25) & p->CPU.CPUID[CPUID_1][3]) != 0) {␊ |
363 | ␉␉p->CPU.Features |= CPU_FEATURE_SSE;␊ |
364 | ␉}␊ |
365 | ␉if ((bit(26) & p->CPU.CPUID[CPUID_1][3]) != 0) {␊ |
366 | ␉␉p->CPU.Features |= CPU_FEATURE_SSE2;␊ |
367 | ␉}␊ |
368 | ␉if ((bit(0) & p->CPU.CPUID[CPUID_1][2]) != 0) {␊ |
369 | ␉␉p->CPU.Features |= CPU_FEATURE_SSE3;␊ |
370 | ␉}␊ |
371 | ␉if ((bit(19) & p->CPU.CPUID[CPUID_1][2]) != 0) {␊ |
372 | ␉␉p->CPU.Features |= CPU_FEATURE_SSE41;␊ |
373 | ␉}␊ |
374 | ␉if ((bit(20) & p->CPU.CPUID[CPUID_1][2]) != 0) {␊ |
375 | ␉␉p->CPU.Features |= CPU_FEATURE_SSE42;␊ |
376 | ␉}␊ |
377 | ␉if ((bit(29) & p->CPU.CPUID[CPUID_81][3]) != 0) {␊ |
378 | ␉␉p->CPU.Features |= CPU_FEATURE_EM64T;␊ |
379 | ␉}␊ |
380 | ␉if ((bit(5) & p->CPU.CPUID[CPUID_1][3]) != 0) {␊ |
381 | ␉␉p->CPU.Features |= CPU_FEATURE_MSR;␊ |
382 | ␉}␊ |
383 | ␉//if ((bit(28) & p->CPU.CPUID[CPUID_1][3]) != 0) {␊ |
384 | ␉if (p->CPU.NoThreads > p->CPU.NoCores) {␊ |
385 | ␉␉p->CPU.Features |= CPU_FEATURE_HTT;␊ |
386 | ␉}␊ |
387 | ␊ |
388 | ␉tscFrequency = measure_tsc_frequency();␊ |
389 | ␉/* if usual method failed */␊ |
390 | ␉if ( tscFrequency < 1000 ) { //TEST␊ |
391 | ␉␉tscFrequency = timeRDTSC() * 20;␊ |
392 | ␉}␊ |
393 | ␉fsbFrequency = 0;␊ |
394 | ␉cpuFrequency = 0;␊ |
395 | ␊ |
396 | ␉if ((p->CPU.Vendor == CPUID_VENDOR_INTEL) && ((p->CPU.Family == 0x06) || (p->CPU.Family == 0x0f))) {␊ |
397 | ␉␉int intelCPU = p->CPU.Model;␊ |
398 | ␉␉if ((p->CPU.Family == 0x06 && p->CPU.Model >= 0x0c) || (p->CPU.Family == 0x0f && p->CPU.Model >= 0x03))␉{␊ |
399 | ␉␉␉/* Nehalem CPU model */␊ |
400 | ␉␉␉if (p->CPU.Family == 0x06 && (p->CPU.Model == CPU_MODEL_NEHALEM␉␉||␊ |
401 | ␉␉␉␉␉␉␉␉␉␉ p->CPU.Model == CPU_MODEL_FIELDS␉||␊ |
402 | ␉␉␉␉␉␉␉␉␉␉ p->CPU.Model == CPU_MODEL_DALES␉||␊ |
403 | ␉␉␉␉␉␉␉␉␉␉ p->CPU.Model == CPU_MODEL_DALES_32NM␉||␊ |
404 | ␉␉␉␉␉␉␉␉␉␉ p->CPU.Model == CPU_MODEL_WESTMERE␉||␊ |
405 | ␉␉␉␉␉␉␉␉␉␉ p->CPU.Model == CPU_MODEL_NEHALEM_EX␉||␊ |
406 | ␉␉␉␉␉␉␉␉␉␉ p->CPU.Model == CPU_MODEL_WESTMERE_EX ||␊ |
407 | ␉␉␉␉␉␉␉␉␉␉ p->CPU.Model == CPU_MODEL_SANDYBRIDGE ||␊ |
408 | ␉␉␉␉␉␉␉␉␉␉ p->CPU.Model == CPU_MODEL_JAKETOWN ||␊ |
409 | ␉␉␉␉␉␉␉␉␉␉ p->CPU.Model == CPU_MODEL_IVYBRIDGE_XEON␉||␊ |
410 | ␉␉␉␉␉␉␉␉␉␉ p->CPU.Model == CPU_MODEL_IVYBRIDGE ||␊ |
411 | ␉␉␉␉␉␉␉␉␉␉ p->CPU.Model == CPU_MODEL_HASWELL ||␊ |
412 | ␉␉␉␉␉␉␉␉␉␉ p->CPU.Model == CPU_MODEL_HASWELL_SVR ||␊ |
413 | ␉␉␉␉␉␉␉␉␉␉ //p->CPU.Model == CPU_MODEL_HASWELL_H ||␊ |
414 | ␉␉␉␉␉␉␉␉␉␉ p->CPU.Model == CPU_MODEL_HASWELL_ULT ||␊ |
415 | ␉␉␉␉␉␉␉␉␉␉ p->CPU.Model == CPU_MODEL_CRYSTALWELL ))␊ |
416 | ␉␉␉{␊ |
417 | ␉␉␉␉msr = rdmsr64(MSR_PLATFORM_INFO);␊ |
418 | ␉␉␉␉DBG("msr(%d): platform_info %08x\n", __LINE__, bitfield(msr, 31, 0));␊ |
419 | ␉␉␉␉bus_ratio_max = bitfield(msr, 15, 8);␊ |
420 | ␉␉␉␉bus_ratio_min = bitfield(msr, 47, 40); //valv: not sure about this one (Remarq.1)␊ |
421 | ␉␉␉␉msr = rdmsr64(MSR_FLEX_RATIO);␊ |
422 | ␉␉␉␉DBG("msr(%d): flex_ratio %08x\n", __LINE__, bitfield(msr, 31, 0));␊ |
423 | ␉␉␉␉if (bitfield(msr, 16, 16)) {␊ |
424 | ␉␉␉␉␉flex_ratio = bitfield(msr, 15, 8);␊ |
425 | ␉␉␉␉␉/* bcc9: at least on the gigabyte h67ma-ud2h,␊ |
426 | ␉␉␉␉␉ where the cpu multipler can't be changed to␊ |
427 | ␉␉␉␉␉ allow overclocking, the flex_ratio msr has unexpected (to OSX)␊ |
428 | ␉␉␉␉␉ contents.␉These contents cause mach_kernel to␊ |
429 | ␉␉␉␉␉ fail to compute the bus ratio correctly, instead␊ |
430 | ␉␉␉␉␉ causing the system to crash since tscGranularity␊ |
431 | ␉␉␉␉␉ is inadvertently set to 0.␊ |
432 | ␉␉␉␉␉ */␊ |
433 | ␉␉␉␉␉if (flex_ratio == 0) {␊ |
434 | ␉␉␉␉␉␉/* Clear bit 16 (evidently the presence bit) */␊ |
435 | ␉␉␉␉␉␉wrmsr64(MSR_FLEX_RATIO, (msr & 0xFFFFFFFFFFFEFFFFULL));␊ |
436 | ␉␉␉␉␉␉msr = rdmsr64(MSR_FLEX_RATIO);␊ |
437 | ␉␉␉␉␉␉verbose("Unusable flex ratio detected. Patched MSR now %08x\n", bitfield(msr, 31, 0));␊ |
438 | ␉␉␉␉␉} else {␊ |
439 | ␉␉␉␉␉␉if (bus_ratio_max > flex_ratio) {␊ |
440 | ␉␉␉␉␉␉␉bus_ratio_max = flex_ratio;␊ |
441 | ␉␉␉␉␉␉}␊ |
442 | ␉␉␉␉␉}␊ |
443 | ␉␉␉␉}␊ |
444 | ␊ |
445 | ␉␉␉␉if (bus_ratio_max) {␊ |
446 | ␉␉␉␉␉fsbFrequency = (tscFrequency / bus_ratio_max);␊ |
447 | ␉␉␉␉}␊ |
448 | ␉␉␉␉//valv: Turbo Ratio Limit␊ |
449 | ␉␉␉␉if ((intelCPU != 0x2e) && (intelCPU != 0x2f)) {␊ |
450 | ␉␉␉␉␉msr = rdmsr64(MSR_TURBO_RATIO_LIMIT);␊ |
451 | ␉␉␉␉␉cpuFrequency = bus_ratio_max * fsbFrequency;␊ |
452 | ␉␉␉␉␉max_ratio = bus_ratio_max * 10;␊ |
453 | ␉␉␉␉} else {␊ |
454 | ␉␉␉␉␉cpuFrequency = tscFrequency;␊ |
455 | ␉␉␉␉}␊ |
456 | ␉␉␉␉if ((getValueForKey(kbusratio, &newratio, &len, &bootInfo->chameleonConfig)) && (len <= 4)) {␊ |
457 | ␉␉␉␉␉max_ratio = atoi(newratio);␊ |
458 | ␉␉␉␉␉max_ratio = (max_ratio * 10);␊ |
459 | ␉␉␉␉␉if (len >= 3) {␊ |
460 | ␉␉␉␉␉␉max_ratio = (max_ratio + 5);␊ |
461 | ␉␉␉␉␉}␊ |
462 | ␊ |
463 | ␉␉␉␉␉verbose("Bus-Ratio: min=%d, max=%s\n", bus_ratio_min, newratio);␊ |
464 | ␊ |
465 | ␉␉␉␉␉// extreme overclockers may love 320 ;)␊ |
466 | ␉␉␉␉␉if ((max_ratio >= min_ratio) && (max_ratio <= 320)) {␊ |
467 | ␉␉␉␉␉␉cpuFrequency = (fsbFrequency * max_ratio) / 10;␊ |
468 | ␉␉␉␉␉␉if (len >= 3) {␊ |
469 | ␉␉␉␉␉␉␉maxdiv = 1;␊ |
470 | ␉␉␉␉␉␉} else {␊ |
471 | ␉␉␉␉␉␉␉maxdiv = 0;␊ |
472 | ␉␉␉␉␉␉}␊ |
473 | ␉␉␉␉␉} else {␊ |
474 | ␉␉␉␉␉␉max_ratio = (bus_ratio_max * 10);␊ |
475 | ␉␉␉␉␉}␊ |
476 | ␉␉␉␉}␊ |
477 | ␉␉␉␉//valv: to be uncommented if Remarq.1 didn't stick␊ |
478 | ␉␉␉␉/*if (bus_ratio_max > 0) bus_ratio = flex_ratio;*/␊ |
479 | ␉␉␉␉p->CPU.MaxRatio = max_ratio;␊ |
480 | ␉␉␉␉p->CPU.MinRatio = min_ratio;␊ |
481 | ␊ |
482 | ␉␉␉␉myfsb = fsbFrequency / 1000000;␊ |
483 | ␉␉␉␉verbose("Sticking with [BCLK: %dMhz, Bus-Ratio: %d]\n", myfsb, max_ratio/10); // Bungo: fixed wrong Bus-Ratio readout␊ |
484 | ␉␉␉␉currcoef = bus_ratio_max;␊ |
485 | ␉␉␉} else {␊ |
486 | ␉␉␉␉msr = rdmsr64(MSR_IA32_PERF_STATUS);␊ |
487 | ␉␉␉␉DBG("msr(%d): ia32_perf_stat 0x%08x\n", __LINE__, bitfield(msr, 31, 0));␊ |
488 | ␉␉␉␉currcoef = bitfield(msr, 12, 8); // Bungo: reverted to 2263 state because of wrong old CPUs freq. calculating␊ |
489 | ␉␉␉␉/* Non-integer bus ratio for the max-multi*/␊ |
490 | ␉␉␉␉maxdiv = bitfield(msr, 46, 46);␊ |
491 | ␉␉␉␉/* Non-integer bus ratio for the current-multi (undocumented)*/␊ |
492 | ␉␉␉␉currdiv = bitfield(msr, 14, 14);␊ |
493 | ␊ |
494 | ␉␉␉␉// This will always be model >= 3␊ |
495 | ␉␉␉␉if ((p->CPU.Family == 0x06 && p->CPU.Model >= 0x0e) || (p->CPU.Family == 0x0f)) {␊ |
496 | ␉␉␉␉␉/* On these models, maxcoef defines TSC freq */␊ |
497 | ␉␉␉␉␉maxcoef = bitfield(msr, 44, 40);␊ |
498 | ␉␉␉␉} else {␊ |
499 | ␉␉␉␉␉/* On lower models, currcoef defines TSC freq */␊ |
500 | ␉␉␉␉␉/* XXX */␊ |
501 | ␉␉␉␉␉maxcoef = currcoef;␊ |
502 | ␉␉␉␉}␊ |
503 | ␊ |
504 | ␉␉␉␉if (maxcoef) {␊ |
505 | ␉␉␉␉␉if (maxdiv) {␊ |
506 | ␉␉␉␉␉␉fsbFrequency = ((tscFrequency * 2) / ((maxcoef * 2) + 1));␊ |
507 | ␉␉␉␉␉} else {␊ |
508 | ␉␉␉␉␉␉fsbFrequency = (tscFrequency / maxcoef);␊ |
509 | ␉␉␉␉␉}␊ |
510 | ␉␉␉␉␉if (currdiv) {␊ |
511 | ␉␉␉␉␉␉cpuFrequency = (fsbFrequency * ((currcoef * 2) + 1) / 2);␊ |
512 | ␉␉␉␉␉} else {␊ |
513 | ␉␉␉␉␉␉cpuFrequency = (fsbFrequency * currcoef);␊ |
514 | ␉␉␉␉␉}␊ |
515 | ␉␉␉␉␉DBG("max: %d%s current: %d%s\n", maxcoef, maxdiv ? ".5" : "",currcoef, currdiv ? ".5" : "");␊ |
516 | ␉␉␉␉}␊ |
517 | ␉␉␉}␊ |
518 | ␉␉}␊ |
519 | ␉␉/* Mobile CPU */␊ |
520 | ␉␉if (rdmsr64(MSR_IA32_PLATFORM_ID) & (1<<28)) {␊ |
521 | ␉␉␉p->CPU.Features |= CPU_FEATURE_MOBILE;␊ |
522 | ␉␉}␊ |
523 | ␉} else if ((p->CPU.Vendor == CPUID_VENDOR_AMD) && (p->CPU.Family == 0x0f)) {␊ |
524 | ␉␉switch(p->CPU.ExtFamily) {␊ |
525 | ␉␉␉case 0x00: /* K8 */␊ |
526 | ␉␉␉␉msr = rdmsr64(K8_FIDVID_STATUS);␊ |
527 | ␉␉␉␉maxcoef = bitfield(msr, 21, 16) / 2 + 4;␊ |
528 | ␉␉␉␉currcoef = bitfield(msr, 5, 0) / 2 + 4;␊ |
529 | ␉␉␉␉break;␊ |
530 | ␊ |
531 | ␉␉␉case 0x01: /* K10 */␊ |
532 | ␉␉␉␉msr = rdmsr64(K10_COFVID_STATUS);␊ |
533 | ␉␉␉␉do_cpuid2(0x00000006, 0, p->CPU.CPUID[CPUID_6]);␊ |
534 | ␉␉␉␉// EffFreq: effective frequency interface␊ |
535 | ␉␉␉␉if (bitfield(p->CPU.CPUID[CPUID_6][2], 0, 0) == 1) {␊ |
536 | ␉␉␉␉␉//uint64_t mperf = measure_mperf_frequency();␊ |
537 | ␉␉␉␉␉uint64_t aperf = measure_aperf_frequency();␊ |
538 | ␉␉␉␉␉cpuFrequency = aperf;␊ |
539 | ␉␉␉␉}␊ |
540 | ␉␉␉␉// NOTE: tsc runs at the maccoeff (non turbo)␊ |
541 | ␉␉␉␉//␉␉␉*not* at the turbo frequency.␊ |
542 | ␉␉␉␉maxcoef␉ = bitfield(msr, 54, 49) / 2 + 4;␊ |
543 | ␉␉␉␉currcoef = bitfield(msr, 5, 0) + 0x10;␊ |
544 | ␉␉␉␉currdiv = 2 << bitfield(msr, 8, 6);␊ |
545 | ␊ |
546 | ␉␉␉␉break;␊ |
547 | ␊ |
548 | ␉␉␉case 0x05: /* K14 */␊ |
549 | ␉␉␉␉msr = rdmsr64(K10_COFVID_STATUS);␊ |
550 | ␉␉␉␉currcoef = (bitfield(msr, 54, 49) + 0x10) << 2;␊ |
551 | ␉␉␉␉currdiv = (bitfield(msr, 8, 4) + 1) << 2;␊ |
552 | ␉␉␉␉currdiv += bitfield(msr, 3, 0);␊ |
553 | ␊ |
554 | ␉␉␉␉break;␊ |
555 | ␊ |
556 | ␉␉␉case 0x02: /* K11 */␊ |
557 | ␉␉␉␉// not implimented␊ |
558 | ␉␉␉␉break;␊ |
559 | ␉␉}␊ |
560 | ␊ |
561 | ␉␉if (maxcoef) {␊ |
562 | ␉␉␉if (currdiv) {␊ |
563 | ␉␉␉␉if (!currcoef) {␊ |
564 | ␉␉␉␉␉currcoef = maxcoef;␊ |
565 | ␉␉␉␉}␊ |
566 | ␊ |
567 | ␉␉␉␉if (!cpuFrequency) {␊ |
568 | ␉␉␉␉␉fsbFrequency = ((tscFrequency * currdiv) / currcoef);␊ |
569 | ␉␉␉␉} else {␊ |
570 | ␉␉␉␉␉fsbFrequency = ((cpuFrequency * currdiv) / currcoef);␊ |
571 | ␉␉␉␉}␊ |
572 | ␉␉␉␉DBG("%d.%d\n", currcoef / currdiv, ((currcoef % currdiv) * 100) / currdiv);␊ |
573 | ␉␉␉} else {␊ |
574 | ␉␉␉␉if (!cpuFrequency) {␊ |
575 | ␉␉␉␉␉fsbFrequency = (tscFrequency / maxcoef);␊ |
576 | ␉␉␉␉} else {␊ |
577 | ␉␉␉␉␉fsbFrequency = (cpuFrequency / maxcoef);␊ |
578 | ␉␉␉␉}␊ |
579 | ␉␉␉␉DBG("%d\n", currcoef);␊ |
580 | ␉␉␉}␊ |
581 | ␉␉} else if (currcoef) {␊ |
582 | ␉␉␉if (currdiv) {␊ |
583 | ␉␉␉␉fsbFrequency = ((tscFrequency * currdiv) / currcoef);␊ |
584 | ␉␉␉␉DBG("%d.%d\n", currcoef / currdiv, ((currcoef % currdiv) * 100) / currdiv);␊ |
585 | ␉␉␉} else {␊ |
586 | ␉␉␉␉fsbFrequency = (tscFrequency / currcoef);␊ |
587 | ␉␉␉␉DBG("%d\n", currcoef);␊ |
588 | ␉␉␉}␊ |
589 | ␉␉}␊ |
590 | ␉␉if (!cpuFrequency) cpuFrequency = tscFrequency;␊ |
591 | ␉}␊ |
592 | ␉␊ |
593 | #if 0␊ |
594 | ␉if (!fsbFrequency) {␊ |
595 | ␉␉fsbFrequency = (DEFAULT_FSB * 1000);␊ |
596 | ␉␉cpuFrequency = tscFrequency;␊ |
597 | ␉␉DBG("0 ! using the default value for FSB !\n");␊ |
598 | ␉}␊ |
599 | #endif␊ |
600 | ␊ |
601 | ␉p->CPU.MaxCoef = maxcoef;␊ |
602 | ␉p->CPU.MaxDiv = maxdiv;␊ |
603 | ␉p->CPU.CurrCoef = currcoef;␊ |
604 | ␉p->CPU.CurrDiv = currdiv;␊ |
605 | ␉p->CPU.TSCFrequency = tscFrequency;␊ |
606 | ␉p->CPU.FSBFrequency = fsbFrequency;␊ |
607 | ␉p->CPU.CPUFrequency = cpuFrequency;␊ |
608 | ␊ |
609 | ␉// keep formatted with spaces instead of tabs␊ |
610 | ␉DBG("CPU: Brand String: %s\n", p->CPU.BrandString);␊ |
611 | ␉DBG("CPU: Vendor/Family/ExtFamily: 0x%x/0x%x/0x%x\n", p->CPU.Vendor, p->CPU.Family, p->CPU.ExtFamily);␊ |
612 | ␉DBG("CPU: Model/ExtModel/Stepping: 0x%x/0x%x/0x%x\n", p->CPU.Model, p->CPU.ExtModel, p->CPU.Stepping);␊ |
613 | ␉DBG("CPU: MaxCoef/CurrCoef: 0x%x/0x%x\n", p->CPU.MaxCoef, p->CPU.CurrCoef);␊ |
614 | ␉DBG("CPU: MaxDiv/CurrDiv: 0x%x/0x%x\n", p->CPU.MaxDiv, p->CPU.CurrDiv);␊ |
615 | ␉DBG("CPU: TSCFreq: %dMHz\n", p->CPU.TSCFrequency / 1000000);␊ |
616 | ␉DBG("CPU: FSBFreq: %dMHz\n", p->CPU.FSBFrequency / 1000000);␊ |
617 | ␉DBG("CPU: CPUFreq: %dMHz\n", p->CPU.CPUFrequency / 1000000);␊ |
618 | ␉DBG("CPU: NoCores/NoThreads: %d/%d\n", p->CPU.NoCores, p->CPU.NoThreads);␊ |
619 | ␉DBG("CPU: Features: 0x%08x\n", p->CPU.Features);␊ |
620 | #if DEBUG_CPU␊ |
621 | ␉pause();␊ |
622 | #endif␊ |
623 | }␊ |
624 | |