1 | /*␊ |
2 | * Copyright 2008 Islam Ahmed Zaid. All rights reserved. <azismed@gmail.com>␊ |
3 | * AsereBLN: 2009: cleanup and bugfix␊ |
4 | */␊ |
5 | ␊ |
6 | #include "libsaio.h"␊ |
7 | #include "platform.h"␊ |
8 | #include "cpu.h"␊ |
9 | #include "bootstruct.h"␊ |
10 | #include "boot.h"␊ |
11 | ␊ |
12 | #ifndef DEBUG_CPU␊ |
13 | #define DEBUG_CPU 0␊ |
14 | #endif␊ |
15 | ␊ |
16 | #if DEBUG_CPU␊ |
17 | #define DBG(x...)␉␉printf(x)␊ |
18 | #else␊ |
19 | #define DBG(x...)␉␉msglog(x)␊ |
20 | #endif␊ |
21 | ␊ |
22 | /*␊ |
23 | * DFE: Measures the TSC frequency in Hz (64-bit) using the ACPI PM timer␊ |
24 | */␊ |
25 | static uint64_t measure_tsc_frequency(void)␊ |
26 | {␊ |
27 | uint64_t tscStart;␊ |
28 | uint64_t tscEnd;␊ |
29 | uint64_t tscDelta = 0xffffffffffffffffULL;␊ |
30 | unsigned long pollCount;␊ |
31 | uint64_t retval = 0;␊ |
32 | int i;␊ |
33 | ␊ |
34 | /* Time how many TSC ticks elapse in 30 msec using the 8254 PIT␊ |
35 | * counter 2. We run this loop 3 times to make sure the cache␊ |
36 | * is hot and we take the minimum delta from all of the runs.␊ |
37 | * That is to say that we're biased towards measuring the minimum␊ |
38 | * number of TSC ticks that occur while waiting for the timer to␊ |
39 | * expire. That theoretically helps avoid inconsistencies when␊ |
40 | * running under a VM if the TSC is not virtualized and the host␊ |
41 | * steals time. The TSC is normally virtualized for VMware.␊ |
42 | */␊ |
43 | for(i = 0; i < 10; ++i)␊ |
44 | {␊ |
45 | enable_PIT2();␊ |
46 | set_PIT2_mode0(CALIBRATE_LATCH);␊ |
47 | tscStart = rdtsc64();␊ |
48 | pollCount = poll_PIT2_gate();␊ |
49 | tscEnd = rdtsc64();␊ |
50 | /* The poll loop must have run at least a few times for accuracy */␊ |
51 | if(pollCount <= 1)␊ |
52 | continue;␊ |
53 | /* The TSC must increment at LEAST once every millisecond. We␊ |
54 | * should have waited exactly 30 msec so the TSC delta should␊ |
55 | * be >= 30. Anything less and the processor is way too slow.␊ |
56 | */␊ |
57 | if((tscEnd - tscStart) <= CALIBRATE_TIME_MSEC)␊ |
58 | continue;␊ |
59 | // tscDelta = MIN(tscDelta, (tscEnd - tscStart))␊ |
60 | if( (tscEnd - tscStart) < tscDelta )␊ |
61 | tscDelta = tscEnd - tscStart;␊ |
62 | }␊ |
63 | /* tscDelta is now the least number of TSC ticks the processor made in␊ |
64 | * a timespan of 0.03 s (e.g. 30 milliseconds)␊ |
65 | * Linux thus divides by 30 which gives the answer in kiloHertz because␊ |
66 | * 1 / ms = kHz. But we're xnu and most of the rest of the code uses␊ |
67 | * Hz so we need to convert our milliseconds to seconds. Since we're␊ |
68 | * dividing by the milliseconds, we simply multiply by 1000.␊ |
69 | */␊ |
70 | ␊ |
71 | /* Unlike linux, we're not limited to 32-bit, but we do need to take care␊ |
72 | * that we're going to multiply by 1000 first so we do need at least some␊ |
73 | * arithmetic headroom. For now, 32-bit should be enough.␊ |
74 | * Also unlike Linux, our compiler can do 64-bit integer arithmetic.␊ |
75 | */␊ |
76 | if(tscDelta > (1ULL<<32))␊ |
77 | retval = 0;␊ |
78 | else␊ |
79 | {␊ |
80 | retval = tscDelta * 1000 / 30;␊ |
81 | }␊ |
82 | disable_PIT2();␊ |
83 | return retval;␊ |
84 | }␊ |
85 | ␊ |
86 | #if 0␊ |
87 | /*␊ |
88 | * DFE: Measures the Max Performance Frequency in Hz (64-bit)␊ |
89 | */␊ |
90 | static uint64_t measure_mperf_frequency(void)␊ |
91 | {␊ |
92 | uint64_t mperfStart;␊ |
93 | uint64_t mperfEnd;␊ |
94 | uint64_t mperfDelta = 0xffffffffffffffffULL;␊ |
95 | unsigned long pollCount;␊ |
96 | uint64_t retval = 0;␊ |
97 | int i;␊ |
98 | ␊ |
99 | /* Time how many MPERF ticks elapse in 30 msec using the 8254 PIT␊ |
100 | * counter 2. We run this loop 3 times to make sure the cache␊ |
101 | * is hot and we take the minimum delta from all of the runs.␊ |
102 | * That is to say that we're biased towards measuring the minimum␊ |
103 | * number of MPERF ticks that occur while waiting for the timer to␊ |
104 | * expire.␊ |
105 | */␊ |
106 | for(i = 0; i < 10; ++i)␊ |
107 | {␊ |
108 | enable_PIT2();␊ |
109 | set_PIT2_mode0(CALIBRATE_LATCH);␊ |
110 | mperfStart = rdmsr64(MSR_AMD_MPERF);␊ |
111 | pollCount = poll_PIT2_gate();␊ |
112 | mperfEnd = rdmsr64(MSR_AMD_MPERF);␊ |
113 | /* The poll loop must have run at least a few times for accuracy */␊ |
114 | if(pollCount <= 1)␊ |
115 | continue;␊ |
116 | /* The MPERF must increment at LEAST once every millisecond. We␊ |
117 | * should have waited exactly 30 msec so the MPERF delta should␊ |
118 | * be >= 30. Anything less and the processor is way too slow.␊ |
119 | */␊ |
120 | if((mperfEnd - mperfStart) <= CALIBRATE_TIME_MSEC)␊ |
121 | continue;␊ |
122 | // tscDelta = MIN(tscDelta, (tscEnd - tscStart))␊ |
123 | if( (mperfEnd - mperfStart) < mperfDelta )␊ |
124 | mperfDelta = mperfEnd - mperfStart;␊ |
125 | }␊ |
126 | /* mperfDelta is now the least number of MPERF ticks the processor made in␊ |
127 | * a timespan of 0.03 s (e.g. 30 milliseconds)␊ |
128 | */␊ |
129 | ␊ |
130 | if(mperfDelta > (1ULL<<32))␊ |
131 | retval = 0;␊ |
132 | else␊ |
133 | {␊ |
134 | retval = mperfDelta * 1000 / 30;␊ |
135 | }␊ |
136 | disable_PIT2();␊ |
137 | return retval;␊ |
138 | }␊ |
139 | #endif␊ |
140 | /*␊ |
141 | * Measures the Actual Performance Frequency in Hz (64-bit)␊ |
142 | */␊ |
143 | static uint64_t measure_aperf_frequency(void)␊ |
144 | {␊ |
145 | uint64_t aperfStart;␊ |
146 | uint64_t aperfEnd;␊ |
147 | uint64_t aperfDelta = 0xffffffffffffffffULL;␊ |
148 | unsigned long pollCount;␊ |
149 | uint64_t retval = 0;␊ |
150 | int i;␊ |
151 | ␊ |
152 | /* Time how many APERF ticks elapse in 30 msec using the 8254 PIT␊ |
153 | * counter 2. We run this loop 3 times to make sure the cache␊ |
154 | * is hot and we take the minimum delta from all of the runs.␊ |
155 | * That is to say that we're biased towards measuring the minimum␊ |
156 | * number of APERF ticks that occur while waiting for the timer to␊ |
157 | * expire. ␊ |
158 | */␊ |
159 | for(i = 0; i < 10; ++i)␊ |
160 | {␊ |
161 | enable_PIT2();␊ |
162 | set_PIT2_mode0(CALIBRATE_LATCH);␊ |
163 | aperfStart = rdmsr64(MSR_AMD_APERF);␊ |
164 | pollCount = poll_PIT2_gate();␊ |
165 | aperfEnd = rdmsr64(MSR_AMD_APERF);␊ |
166 | /* The poll loop must have run at least a few times for accuracy */␊ |
167 | if(pollCount <= 1)␊ |
168 | continue;␊ |
169 | /* The TSC must increment at LEAST once every millisecond. We␊ |
170 | * should have waited exactly 30 msec so the APERF delta should␊ |
171 | * be >= 30. Anything less and the processor is way too slow.␊ |
172 | */␊ |
173 | if((aperfEnd - aperfStart) <= CALIBRATE_TIME_MSEC)␊ |
174 | continue;␊ |
175 | // tscDelta = MIN(tscDelta, (tscEnd - tscStart))␊ |
176 | if( (aperfEnd - aperfStart) < aperfDelta )␊ |
177 | aperfDelta = aperfEnd - aperfStart;␊ |
178 | }␊ |
179 | /* mperfDelta is now the least number of MPERF ticks the processor made in␊ |
180 | * a timespan of 0.03 s (e.g. 30 milliseconds)␊ |
181 | */␊ |
182 | ␊ |
183 | if(aperfDelta > (1ULL<<32))␊ |
184 | retval = 0;␊ |
185 | else␊ |
186 | {␊ |
187 | retval = aperfDelta * 1000 / 30;␊ |
188 | }␊ |
189 | disable_PIT2();␊ |
190 | return retval;␊ |
191 | }␊ |
192 | ␊ |
193 | ␊ |
194 | /*␊ |
195 | * Calculates the FSB and CPU frequencies using specific MSRs for each CPU␊ |
196 | * - multi. is read from a specific MSR. In the case of Intel, there is:␊ |
197 | * a max multi. (used to calculate the FSB freq.),␊ |
198 | * and a current multi. (used to calculate the CPU freq.)␊ |
199 | * - fsbFrequency = tscFrequency / multi␊ |
200 | * - cpuFrequency = fsbFrequency * multi␊ |
201 | */␊ |
202 | ␊ |
203 | void scan_cpu(PlatformInfo_t *p)␊ |
204 | {␊ |
205 | ␉uint64_t␉tscFrequency, fsbFrequency, cpuFrequency;␊ |
206 | ␉uint64_t␉msr, flex_ratio;␊ |
207 | ␉uint8_t␉␉maxcoef, maxdiv, currcoef, bus_ratio_max, currdiv;␊ |
208 | ␉const char *newratio;␊ |
209 | ␉int len, myfsb;␊ |
210 | ␉uint8_t bus_ratio_min;␊ |
211 | ␉uint32_t max_ratio, min_ratio;␊ |
212 | ␊ |
213 | ␉max_ratio = min_ratio = myfsb = bus_ratio_min = 0;␊ |
214 | ␉maxcoef = maxdiv = bus_ratio_max = currcoef = currdiv = 0;␊ |
215 | ␊ |
216 | ␉/* get cpuid values */␊ |
217 | ␉do_cpuid(0x00000000, p->CPU.CPUID[CPUID_0]);␊ |
218 | ␉do_cpuid(0x00000001, p->CPU.CPUID[CPUID_1]);␊ |
219 | ␉do_cpuid(0x00000002, p->CPU.CPUID[CPUID_2]);␊ |
220 | ␉do_cpuid(0x00000003, p->CPU.CPUID[CPUID_3]);␊ |
221 | do_cpuid2(0x00000004, 0, p->CPU.CPUID[CPUID_4]);␊ |
222 | ␉do_cpuid(0x80000000, p->CPU.CPUID[CPUID_80]);␊ |
223 | if ((p->CPU.CPUID[CPUID_80][0] & 0x0000000f) >= 8) {␊ |
224 | do_cpuid(0x80000008, p->CPU.CPUID[CPUID_88]);␊ |
225 | do_cpuid(0x80000001, p->CPU.CPUID[CPUID_81]);␊ |
226 | ␉}␊ |
227 | else if ((p->CPU.CPUID[CPUID_80][0] & 0x0000000f) >= 1) {␊ |
228 | ␉␉do_cpuid(0x80000001, p->CPU.CPUID[CPUID_81]);␊ |
229 | ␉}␊ |
230 | ␊ |
231 | ␊ |
232 | #if DEBUG_CPU␊ |
233 | ␉{␊ |
234 | ␉␉int␉␉i;␊ |
235 | ␉␉printf("CPUID Raw Values:\n");␊ |
236 | ␉␉for (i=0; i<CPUID_MAX; i++) {␊ |
237 | ␉␉␉printf("%02d: %08x-%08x-%08x-%08x\n", i,␊ |
238 | p->CPU.CPUID[i][0], p->CPU.CPUID[i][1],␊ |
239 | p->CPU.CPUID[i][2], p->CPU.CPUID[i][3]);␊ |
240 | ␉␉}␊ |
241 | ␉}␊ |
242 | #endif␊ |
243 | ␉p->CPU.Vendor␉␉= p->CPU.CPUID[CPUID_0][1];␊ |
244 | ␉p->CPU.Signature␉= p->CPU.CPUID[CPUID_1][0];␊ |
245 | ␉p->CPU.Stepping␉␉= bitfield(p->CPU.CPUID[CPUID_1][0], 3, 0);␊ |
246 | ␉p->CPU.Model␉␉= bitfield(p->CPU.CPUID[CPUID_1][0], 7, 4);␊ |
247 | ␉p->CPU.Family␉␉= bitfield(p->CPU.CPUID[CPUID_1][0], 11, 8);␊ |
248 | ␉p->CPU.ExtModel␉␉= bitfield(p->CPU.CPUID[CPUID_1][0], 19, 16);␊ |
249 | ␉p->CPU.ExtFamily␉= bitfield(p->CPU.CPUID[CPUID_1][0], 27, 20);␊ |
250 | ␉␊ |
251 | p->CPU.Model += (p->CPU.ExtModel << 4);␊ |
252 | ␊ |
253 | if (p->CPU.Vendor == CPUID_VENDOR_INTEL && ␊ |
254 | p->CPU.Family == 0x06 && ␊ |
255 | p->CPU.Model >= CPUID_MODEL_NEHALEM && ␊ |
256 | p->CPU.Model != CPUID_MODEL_ATOM // MSR is *NOT* available on the Intel Atom CPU␊ |
257 | )␊ |
258 | {␊ |
259 | msr = rdmsr64(MSR_CORE_THREAD_COUNT);␉␉␉␉␉␉␉␉␉// Undocumented MSR in Nehalem and newer CPUs␊ |
260 | p->CPU.NoCores␉␉= bitfield((uint32_t)msr, 31, 16);␉␉␉␉␉// Using undocumented MSR to get actual values␊ |
261 | p->CPU.NoThreads␉= bitfield((uint32_t)msr, 15, 0);␉␉␉␉␉// Using undocumented MSR to get actual values␊ |
262 | ␉}␊ |
263 | else if (p->CPU.Vendor == CPUID_VENDOR_AMD)␊ |
264 | {␊ |
265 | p->CPU.NoThreads␉= bitfield(p->CPU.CPUID[CPUID_1][1], 23, 16);␊ |
266 | p->CPU.NoCores␉␉= bitfield(p->CPU.CPUID[CPUID_88][2], 7, 0) + 1;␊ |
267 | }␊ |
268 | else␊ |
269 | {␊ |
270 | p->CPU.NoThreads␉= bitfield(p->CPU.CPUID[CPUID_1][1], 23, 16);␉␉// Use previous method for Cores and Threads␊ |
271 | p->CPU.NoCores␉␉= bitfield(p->CPU.CPUID[CPUID_4][0], 31, 26) + 1;␊ |
272 | ␉}␊ |
273 | ␉␊ |
274 | ␉/* get brand string (if supported) */␊ |
275 | ␉/* Copyright: from Apple's XNU cpuid.c */␊ |
276 | ␉if (p->CPU.CPUID[CPUID_80][0] > 0x80000004) {␊ |
277 | ␉␉uint32_t␉reg[4];␊ |
278 | char str[128], *s;␊ |
279 | ␉␉/*␊ |
280 | ␉␉ * The brand string 48 bytes (max), guaranteed to␊ |
281 | ␉␉ * be NULL terminated.␊ |
282 | ␉␉ */␊ |
283 | ␉␉do_cpuid(0x80000002, reg);␊ |
284 | ␉␉bcopy((char *)reg, &str[0], 16);␊ |
285 | ␉␉do_cpuid(0x80000003, reg);␊ |
286 | ␉␉bcopy((char *)reg, &str[16], 16);␊ |
287 | ␉␉do_cpuid(0x80000004, reg);␊ |
288 | ␉␉bcopy((char *)reg, &str[32], 16);␊ |
289 | ␉␉for (s = str; *s != '\0'; s++) {␊ |
290 | ␉␉␉if (*s != ' ') break;␊ |
291 | ␉␉}␊ |
292 | ␉␉␊ |
293 | ␉␉strlcpy(p->CPU.BrandString,␉s, sizeof(p->CPU.BrandString));␊ |
294 | ␉␉␊ |
295 | ␉␉if (!strncmp(p->CPU.BrandString, CPU_STRING_UNKNOWN, MIN(sizeof(p->CPU.BrandString), strlen(CPU_STRING_UNKNOWN) + 1))) {␊ |
296 | /*␊ |
297 | * This string means we have a firmware-programmable brand string,␊ |
298 | * and the firmware couldn't figure out what sort of CPU we have.␊ |
299 | */␊ |
300 | p->CPU.BrandString[0] = '\0';␊ |
301 | }␊ |
302 | ␉}␊ |
303 | ␉␊ |
304 | ␉/* setup features */␊ |
305 | ␉if ((bit(23) & p->CPU.CPUID[CPUID_1][3]) != 0) {␊ |
306 | ␉␉p->CPU.Features |= CPU_FEATURE_MMX;␊ |
307 | ␉}␊ |
308 | ␉if ((bit(25) & p->CPU.CPUID[CPUID_1][3]) != 0) {␊ |
309 | ␉␉p->CPU.Features |= CPU_FEATURE_SSE;␊ |
310 | ␉}␊ |
311 | ␉if ((bit(26) & p->CPU.CPUID[CPUID_1][3]) != 0) {␊ |
312 | ␉␉p->CPU.Features |= CPU_FEATURE_SSE2;␊ |
313 | ␉}␊ |
314 | ␉if ((bit(0) & p->CPU.CPUID[CPUID_1][2]) != 0) {␊ |
315 | ␉␉p->CPU.Features |= CPU_FEATURE_SSE3;␊ |
316 | ␉}␊ |
317 | ␉if ((bit(19) & p->CPU.CPUID[CPUID_1][2]) != 0) {␊ |
318 | ␉␉p->CPU.Features |= CPU_FEATURE_SSE41;␊ |
319 | ␉}␊ |
320 | ␉if ((bit(20) & p->CPU.CPUID[CPUID_1][2]) != 0) {␊ |
321 | ␉␉p->CPU.Features |= CPU_FEATURE_SSE42;␊ |
322 | ␉}␊ |
323 | ␉if ((bit(29) & p->CPU.CPUID[CPUID_81][3]) != 0) {␊ |
324 | ␉␉p->CPU.Features |= CPU_FEATURE_EM64T;␊ |
325 | ␉}␊ |
326 | ␉if ((bit(5) & p->CPU.CPUID[CPUID_1][3]) != 0) {␊ |
327 | ␉␉p->CPU.Features |= CPU_FEATURE_MSR;␊ |
328 | ␉}␊ |
329 | ␉//if ((bit(28) & p->CPU.CPUID[CPUID_1][3]) != 0) {␊ |
330 | ␉if (p->CPU.NoThreads > p->CPU.NoCores) {␊ |
331 | ␉␉p->CPU.Features |= CPU_FEATURE_HTT;␊ |
332 | ␉}␊ |
333 | ␊ |
334 | ␉tscFrequency = measure_tsc_frequency();␊ |
335 | ␉fsbFrequency = 0;␊ |
336 | ␉cpuFrequency = 0;␊ |
337 | ␊ |
338 | ␉if ((p->CPU.Vendor == CPUID_VENDOR_INTEL) && ((p->CPU.Family == 0x06) || (p->CPU.Family == 0x0f))) {␊ |
339 | ␉␉int intelCPU = p->CPU.Model;␊ |
340 | ␉␉if ((p->CPU.Family == 0x06 && p->CPU.Model >= 0x0c) || (p->CPU.Family == 0x0f && p->CPU.Model >= 0x03)) {␊ |
341 | ␉␉␉/* Nehalem CPU model */␊ |
342 | ␉␉␉if (p->CPU.Family == 0x06 && (p->CPU.Model == CPU_MODEL_NEHALEM || ␊ |
343 | p->CPU.Model == CPU_MODEL_FIELDS || ␊ |
344 | p->CPU.Model == CPU_MODEL_DALES || ␊ |
345 | p->CPU.Model == CPU_MODEL_DALES_32NM || ␊ |
346 | p->CPU.Model == CPU_MODEL_WESTMERE ||␊ |
347 | p->CPU.Model == CPU_MODEL_NEHALEM_EX ||␊ |
348 | p->CPU.Model == CPU_MODEL_WESTMERE_EX ||␊ |
349 | p->CPU.Model == CPU_MODEL_SANDY ||␊ |
350 | p->CPU.Model == CPU_MODEL_SANDY_XEON)) {␊ |
351 | ␉␉␉␉msr = rdmsr64(MSR_PLATFORM_INFO);␊ |
352 | DBG("msr(%d): platform_info %08x\n", __LINE__, bitfield(msr, 31, 0));␊ |
353 | bus_ratio_max = bitfield(msr, 14, 8);␊ |
354 | bus_ratio_min = bitfield(msr, 46, 40); //valv: not sure about this one (Remarq.1)␊ |
355 | ␉␉␉␉msr = rdmsr64(MSR_FLEX_RATIO);␊ |
356 | DBG("msr(%d): flex_ratio %08x\n", __LINE__, bitfield(msr, 31, 0));␊ |
357 | if (bitfield(msr, 16, 16)) {␊ |
358 | flex_ratio = bitfield(msr, 14, 8);␊ |
359 | ␉␉␉␉␉/* bcc9: at least on the gigabyte h67ma-ud2h,␊ |
360 | where the cpu multipler can't be changed to␊ |
361 | allow overclocking, the flex_ratio msr has unexpected (to OSX)␊ |
362 | contents. These contents cause mach_kernel to␊ |
363 | fail to compute the bus ratio correctly, instead␊ |
364 | causing the system to crash since tscGranularity␊ |
365 | is inadvertently set to 0.␊ |
366 | */␊ |
367 | ␉␉␉␉␉if (flex_ratio == 0) {␊ |
368 | ␉␉␉␉␉␉/* Clear bit 16 (evidently the␊ |
369 | presence bit) */␊ |
370 | ␉␉␉␉␉␉wrmsr64(MSR_FLEX_RATIO, (msr & 0xFFFFFFFFFFFEFFFFULL));␊ |
371 | ␉␉␉␉␉␉msr = rdmsr64(MSR_FLEX_RATIO);␊ |
372 | verbose("Unusable flex ratio detected. Patched MSR now %08x\n", bitfield(msr, 31, 0));␊ |
373 | ␉␉␉␉␉} else {␊ |
374 | ␉␉␉␉␉␉if (bus_ratio_max > flex_ratio) {␊ |
375 | ␉␉␉␉␉␉␉bus_ratio_max = flex_ratio;␊ |
376 | ␉␉␉␉␉␉}␊ |
377 | ␉␉␉␉␉}␊ |
378 | ␉␉␉␉}␊ |
379 | ␊ |
380 | ␉␉␉␉if (bus_ratio_max) {␊ |
381 | ␉␉␉␉␉fsbFrequency = (tscFrequency / bus_ratio_max);␊ |
382 | ␉␉␉␉}␊ |
383 | ␉␉␉␉//valv: Turbo Ratio Limit␊ |
384 | ␉␉␉␉if ((intelCPU != 0x2e) && (intelCPU != 0x2f)) {␊ |
385 | ␉␉␉␉␉msr = rdmsr64(MSR_TURBO_RATIO_LIMIT);␊ |
386 | ␉␉␉␉␉cpuFrequency = bus_ratio_max * fsbFrequency;␊ |
387 | ␉␉␉␉␉max_ratio = bus_ratio_max * 10;␊ |
388 | ␉␉␉␉} else {␊ |
389 | ␉␉␉␉␉cpuFrequency = tscFrequency;␊ |
390 | ␉␉␉␉}␊ |
391 | ␉␉␉␉if ((getValueForKey(kbusratio, &newratio, &len, &bootInfo->chameleonConfig)) && (len <= 4)) {␊ |
392 | ␉␉␉␉␉max_ratio = atoi(newratio);␊ |
393 | ␉␉␉␉␉max_ratio = (max_ratio * 10);␊ |
394 | ␉␉␉␉␉if (len >= 3) max_ratio = (max_ratio + 5);␊ |
395 | ␊ |
396 | ␉␉␉␉␉verbose("Bus-Ratio: min=%d, max=%s\n", bus_ratio_min, newratio);␊ |
397 | ␊ |
398 | ␉␉␉␉␉// extreme overclockers may love 320 ;)␊ |
399 | ␉␉␉␉␉if ((max_ratio >= min_ratio) && (max_ratio <= 320)) {␊ |
400 | ␉␉␉␉␉␉cpuFrequency = (fsbFrequency * max_ratio) / 10;␊ |
401 | ␉␉␉␉␉␉if (len >= 3) maxdiv = 1;␊ |
402 | ␉␉␉␉␉␉else maxdiv = 0;␊ |
403 | ␉␉␉␉␉} else {␊ |
404 | ␉␉␉␉␉␉max_ratio = (bus_ratio_max * 10);␊ |
405 | ␉␉␉␉␉}␊ |
406 | ␉␉␉␉}␊ |
407 | ␉␉␉␉//valv: to be uncommented if Remarq.1 didn't stick␊ |
408 | ␉␉␉␉/*if(bus_ratio_max > 0) bus_ratio = flex_ratio;*/␊ |
409 | ␉␉␉␉p->CPU.MaxRatio = max_ratio;␊ |
410 | ␉␉␉␉p->CPU.MinRatio = min_ratio;␊ |
411 | ␊ |
412 | ␉␉␉␉myfsb = fsbFrequency / 1000000;␊ |
413 | ␉␉␉␉verbose("Sticking with [BCLK: %dMhz, Bus-Ratio: %d]\n", myfsb, max_ratio);␊ |
414 | ␉␉␉␉currcoef = bus_ratio_max;␊ |
415 | ␉␉␉} else {␊ |
416 | ␉␉␉␉msr = rdmsr64(MSR_IA32_PERF_STATUS);␊ |
417 | DBG("msr(%d): ia32_perf_stat 0x%08x\n", __LINE__, bitfield(msr, 31, 0));␊ |
418 | currcoef = bitfield(msr, 12, 8);␊ |
419 | ␉␉␉␉/* Non-integer bus ratio for the max-multi*/␊ |
420 | maxdiv = bitfield(msr, 46, 46);␊ |
421 | ␉␉␉␉/* Non-integer bus ratio for the current-multi (undocumented)*/␊ |
422 | currdiv = bitfield(msr, 14, 14);␊ |
423 | ␊ |
424 | ␉␉␉␉if ((p->CPU.Family == 0x06 && p->CPU.Model >= 0x0e) || (p->CPU.Family == 0x0f)) // This will always be model >= 3␊ |
425 | ␉␉␉␉{␊ |
426 | ␉␉␉␉␉/* On these models, maxcoef defines TSC freq */␊ |
427 | maxcoef = bitfield(msr, 44, 40);␊ |
428 | ␉␉␉␉} else {␊ |
429 | ␉␉␉␉␉/* On lower models, currcoef defines TSC freq */␊ |
430 | ␉␉␉␉␉/* XXX */␊ |
431 | ␉␉␉␉␉maxcoef = currcoef;␊ |
432 | ␉␉␉␉}␊ |
433 | ␊ |
434 | ␉␉␉␉if (maxcoef) {␊ |
435 | ␉␉␉␉␉if (maxdiv) {␊ |
436 | ␉␉␉␉␉␉fsbFrequency = ((tscFrequency * 2) / ((maxcoef * 2) + 1));␊ |
437 | ␉␉␉␉␉} else {␊ |
438 | ␉␉␉␉␉␉fsbFrequency = (tscFrequency / maxcoef);␊ |
439 | ␉␉␉␉␉}␊ |
440 | ␉␉␉␉␉if (currdiv) {␊ |
441 | ␉␉␉␉␉␉cpuFrequency = (fsbFrequency * ((currcoef * 2) + 1) / 2);␊ |
442 | ␉␉␉␉␉} else {␊ |
443 | ␉␉␉␉␉␉cpuFrequency = (fsbFrequency * currcoef);␊ |
444 | ␉␉␉␉␉}␊ |
445 | ␉␉␉␉␉DBG("max: %d%s current: %d%s\n", maxcoef, maxdiv ? ".5" : "",currcoef, currdiv ? ".5" : "");␊ |
446 | ␉␉␉␉}␊ |
447 | ␉␉␉}␊ |
448 | ␉␉}␊ |
449 | ␉␉/* Mobile CPU */␊ |
450 | ␉␉if (rdmsr64(MSR_IA32_PLATFORM_ID) & (1<<28)) {␊ |
451 | ␉␉␉p->CPU.Features |= CPU_FEATURE_MOBILE;␊ |
452 | ␉␉}␊ |
453 | ␉}␊ |
454 | ␉else if((p->CPU.Vendor == CPUID_VENDOR_AMD) && (p->CPU.Family == 0x0f))␊ |
455 | {␊ |
456 | switch(p->CPU.ExtFamily)␊ |
457 | {␊ |
458 | case 0x00: /* K8 */␊ |
459 | msr = rdmsr64(K8_FIDVID_STATUS);␊ |
460 | maxcoef = bitfield(msr, 21, 16) / 2 + 4;␊ |
461 | currcoef = bitfield(msr, 5, 0) / 2 + 4;␊ |
462 | break;␊ |
463 | ␊ |
464 | case 0x01: /* K10 */␊ |
465 | msr = rdmsr64(K10_COFVID_STATUS);␊ |
466 | do_cpuid2(0x00000006, 0, p->CPU.CPUID[CPUID_6]);␊ |
467 | if(bitfield(p->CPU.CPUID[CPUID_6][2], 0, 0) == 1) // EffFreq: effective frequency interface␊ |
468 | {␊ |
469 | //uint64_t mperf = measure_mperf_frequency();␊ |
470 | uint64_t aperf = measure_aperf_frequency();␊ |
471 | cpuFrequency = aperf;␊ |
472 | }␊ |
473 | // NOTE: tsc runs at the maccoeff (non turbo)␊ |
474 | // *not* at the turbo frequency.␊ |
475 | maxcoef = bitfield(msr, 54, 49) / 2 + 4;␊ |
476 | currcoef = bitfield(msr, 5, 0) + 0x10;␊ |
477 | currdiv = 2 << bitfield(msr, 8, 6);␊ |
478 | ␊ |
479 | break;␊ |
480 | ␊ |
481 | case 0x05: /* K14 */␊ |
482 | msr = rdmsr64(K10_COFVID_STATUS);␊ |
483 | currcoef = (bitfield(msr, 54, 49) + 0x10) << 2;␊ |
484 | currdiv = (bitfield(msr, 8, 4) + 1) << 2;␊ |
485 | currdiv += bitfield(msr, 3, 0);␊ |
486 | ␊ |
487 | break;␊ |
488 | ␊ |
489 | case 0x02: /* K11 */␊ |
490 | // not implimented␊ |
491 | break;␊ |
492 | }␊ |
493 | ␊ |
494 | if (maxcoef)␊ |
495 | {␊ |
496 | if (currdiv)␊ |
497 | {␊ |
498 | if(!currcoef) currcoef = maxcoef;␊ |
499 | if(!cpuFrequency)␊ |
500 | fsbFrequency = ((tscFrequency * currdiv) / currcoef);␊ |
501 | else ␊ |
502 | fsbFrequency = ((cpuFrequency * currdiv) / currcoef);␊ |
503 | ␊ |
504 | DBG("%d.%d\n", currcoef / currdiv, ((currcoef % currdiv) * 100) / currdiv);␊ |
505 | } else {␊ |
506 | if(!cpuFrequency)␊ |
507 | fsbFrequency = (tscFrequency / maxcoef);␊ |
508 | else ␊ |
509 | fsbFrequency = (cpuFrequency / maxcoef);␊ |
510 | DBG("%d\n", currcoef);␊ |
511 | }␊ |
512 | }␊ |
513 | else if (currcoef)␊ |
514 | {␊ |
515 | if (currdiv)␊ |
516 | {␊ |
517 | fsbFrequency = ((tscFrequency * currdiv) / currcoef);␊ |
518 | DBG("%d.%d\n", currcoef / currdiv, ((currcoef % currdiv) * 100) / currdiv);␊ |
519 | } else {␊ |
520 | fsbFrequency = (tscFrequency / currcoef);␊ |
521 | DBG("%d\n", currcoef);␊ |
522 | }␊ |
523 | }␊ |
524 | if(!cpuFrequency) cpuFrequency = tscFrequency;␊ |
525 | }␊ |
526 | #if 0␊ |
527 | if (!fsbFrequency) {␊ |
528 | fsbFrequency = (DEFAULT_FSB * 1000);␊ |
529 | cpuFrequency = tscFrequency;␊ |
530 | DBG("0 ! using the default value for FSB !\n");␊ |
531 | }␊ |
532 | #endif␊ |
533 | ␊ |
534 | p->CPU.MaxCoef = maxcoef;␊ |
535 | p->CPU.MaxDiv = maxdiv;␊ |
536 | p->CPU.CurrCoef = currcoef;␊ |
537 | p->CPU.CurrDiv = currdiv;␊ |
538 | p->CPU.TSCFrequency = tscFrequency;␊ |
539 | p->CPU.FSBFrequency = fsbFrequency;␊ |
540 | p->CPU.CPUFrequency = cpuFrequency;␊ |
541 | ␊ |
542 | DBG("CPU: Brand String: %s\n",␉␉␉␉p->CPU.BrandString);␊ |
543 | DBG("CPU: Vendor/Family/ExtFamily: 0x%x/0x%x/0x%x\n",␉p->CPU.Vendor, p->CPU.Family, p->CPU.ExtFamily);␊ |
544 | DBG("CPU: Model/ExtModel/Stepping: 0x%x/0x%x/0x%x\n",␉p->CPU.Model, p->CPU.ExtModel, p->CPU.Stepping);␊ |
545 | DBG("CPU: MaxCoef/CurrCoef: 0x%x/0x%x\n",␉␉p->CPU.MaxCoef, p->CPU.CurrCoef);␊ |
546 | DBG("CPU: MaxDiv/CurrDiv: 0x%x/0x%x\n",␉␉p->CPU.MaxDiv, p->CPU.CurrDiv);␊ |
547 | DBG("CPU: TSCFreq: %dMHz\n",␉␉␉p->CPU.TSCFrequency / 1000000);␊ |
548 | DBG("CPU: FSBFreq: %dMHz\n",␉␉␉p->CPU.FSBFrequency / 1000000);␊ |
549 | DBG("CPU: CPUFreq: %dMHz\n",␉␉␉p->CPU.CPUFrequency / 1000000);␊ |
550 | DBG("CPU: NoCores/NoThreads: %d/%d\n",␉␉␉p->CPU.NoCores, p->CPU.NoThreads);␊ |
551 | DBG("CPU: Features: 0x%08x\n",␉␉␉p->CPU.Features);␊ |
552 | #if DEBUG_CPU␊ |
553 | pause();␊ |
554 | #endif␊ |
555 | }␊ |
556 | |