Chameleon

Chameleon Svn Source Tree

Root/branches/slice/trunkM/i386/libsaio/cpu.c

1/*
2 * Copyright 2008 Islam Ahmed Zaid. All rights reserved. <azismed@gmail.com>
3 * AsereBLN: 2009: cleanup and bugfix
4 */
5
6#include "libsaio.h"
7#include "platform.h"
8//#include "mem.h"
9#include "smbios_getters.h"
10#include "cpu.h"
11#include "bootstruct.h"
12#include "boot.h"
13
14#ifndef DEBUG_CPU
15#define DEBUG_CPU 0
16#endif
17
18#if DEBUG_CPU
19#define DBG(x...)printf(x)
20#else
21#define DBG(x...)msglog(x)
22#endif
23
24/*
25 * DFE: Measures the TSC frequency in Hz (64-bit) using the ACPI PM timer
26 */
27static uint64_t measure_tsc_frequency(void)
28{
29 uint64_t tscStart;
30 uint64_t tscEnd;
31 uint64_t tscDelta = 0xffffffffffffffffULL;
32 unsigned long pollCount;
33 uint64_t retval = 0;
34 int i;
35
36 /* Time how many TSC ticks elapse in 30 msec using the 8254 PIT
37 * counter 2. We run this loop 3 times to make sure the cache
38 * is hot and we take the minimum delta from all of the runs.
39 * That is to say that we're biased towards measuring the minimum
40 * number of TSC ticks that occur while waiting for the timer to
41 * expire. That theoretically helps avoid inconsistencies when
42 * running under a VM if the TSC is not virtualized and the host
43 * steals time. The TSC is normally virtualized for VMware.
44 */
45 for(i = 0; i < 10; ++i)
46 {
47 enable_PIT2();
48 set_PIT2_mode0(CALIBRATE_LATCH);
49 tscStart = rdtsc64();
50 pollCount = poll_PIT2_gate();
51 tscEnd = rdtsc64();
52 /* The poll loop must have run at least a few times for accuracy */
53 if(pollCount <= 1)
54 continue;
55 /* The TSC must increment at LEAST once every millisecond. We
56 * should have waited exactly 30 msec so the TSC delta should
57 * be >= 30. Anything less and the processor is way too slow.
58 */
59 if((tscEnd - tscStart) <= CALIBRATE_TIME_MSEC)
60 continue;
61 // tscDelta = MIN(tscDelta, (tscEnd - tscStart))
62 if( (tscEnd - tscStart) < tscDelta )
63 tscDelta = tscEnd - tscStart;
64 }
65 /* tscDelta is now the least number of TSC ticks the processor made in
66 * a timespan of 0.03 s (e.g. 30 milliseconds)
67 * Linux thus divides by 30 which gives the answer in kiloHertz because
68 * 1 / ms = kHz. But we're xnu and most of the rest of the code uses
69 * Hz so we need to convert our milliseconds to seconds. Since we're
70 * dividing by the milliseconds, we simply multiply by 1000.
71 */
72
73 /* Unlike linux, we're not limited to 32-bit, but we do need to take care
74 * that we're going to multiply by 1000 first so we do need at least some
75 * arithmetic headroom. For now, 32-bit should be enough.
76 * Also unlike Linux, our compiler can do 64-bit integer arithmetic.
77 */
78 if(tscDelta > (1ULL<<32))
79 retval = 0;
80 else
81 {
82 retval = tscDelta * 1000 / 30;
83 }
84 disable_PIT2();
85 return retval;
86}
87
88#if 0
89/*
90 * DFE: Measures the Max Performance Frequency in Hz (64-bit)
91 */
92static uint64_t measure_mperf_frequency(void)
93{
94 uint64_t mperfStart;
95 uint64_t mperfEnd;
96 uint64_t mperfDelta = 0xffffffffffffffffULL;
97 unsigned long pollCount;
98 uint64_t retval = 0;
99 int i;
100
101 /* Time how many MPERF ticks elapse in 30 msec using the 8254 PIT
102 * counter 2. We run this loop 3 times to make sure the cache
103 * is hot and we take the minimum delta from all of the runs.
104 * That is to say that we're biased towards measuring the minimum
105 * number of MPERF ticks that occur while waiting for the timer to
106 * expire.
107 */
108 for(i = 0; i < 10; ++i)
109 {
110 enable_PIT2();
111 set_PIT2_mode0(CALIBRATE_LATCH);
112 mperfStart = rdmsr64(MSR_AMD_MPERF);
113 pollCount = poll_PIT2_gate();
114 mperfEnd = rdmsr64(MSR_AMD_MPERF);
115 /* The poll loop must have run at least a few times for accuracy */
116 if(pollCount <= 1)
117 continue;
118 /* The MPERF must increment at LEAST once every millisecond. We
119 * should have waited exactly 30 msec so the MPERF delta should
120 * be >= 30. Anything less and the processor is way too slow.
121 */
122 if((mperfEnd - mperfStart) <= CALIBRATE_TIME_MSEC)
123 continue;
124 // tscDelta = MIN(tscDelta, (tscEnd - tscStart))
125 if( (mperfEnd - mperfStart) < mperfDelta )
126 mperfDelta = mperfEnd - mperfStart;
127 }
128 /* mperfDelta is now the least number of MPERF ticks the processor made in
129 * a timespan of 0.03 s (e.g. 30 milliseconds)
130 */
131
132 if(mperfDelta > (1ULL<<32))
133 retval = 0;
134 else
135 {
136 retval = mperfDelta * 1000 / 30;
137 }
138 disable_PIT2();
139 return retval;
140}
141#endif
142/*
143 * Measures the Actual Performance Frequency in Hz (64-bit)
144 */
145#if 0
146static uint64_t measure_aperf_frequency(void)
147{
148 uint64_t aperfStart;
149 uint64_t aperfEnd;
150 uint64_t aperfDelta = 0xffffffffffffffffULL;
151 unsigned long pollCount;
152 uint64_t retval = 0;
153 int i;
154
155 /* Time how many APERF ticks elapse in 30 msec using the 8254 PIT
156 * counter 2. We run this loop 3 times to make sure the cache
157 * is hot and we take the minimum delta from all of the runs.
158 * That is to say that we're biased towards measuring the minimum
159 * number of APERF ticks that occur while waiting for the timer to
160 * expire.
161 */
162 for(i = 0; i < 10; ++i)
163 {
164 enable_PIT2();
165 set_PIT2_mode0(CALIBRATE_LATCH);
166 aperfStart = rdmsr64(MSR_AMD_APERF);
167 pollCount = poll_PIT2_gate();
168 aperfEnd = rdmsr64(MSR_AMD_APERF);
169 /* The poll loop must have run at least a few times for accuracy */
170 if(pollCount <= 1)
171 continue;
172 /* The TSC must increment at LEAST once every millisecond. We
173 * should have waited exactly 30 msec so the APERF delta should
174 * be >= 30. Anything less and the processor is way too slow.
175 */
176 if((aperfEnd - aperfStart) <= CALIBRATE_TIME_MSEC)
177 continue;
178 // tscDelta = MIN(tscDelta, (tscEnd - tscStart))
179 if( (aperfEnd - aperfStart) < aperfDelta )
180 aperfDelta = aperfEnd - aperfStart;
181 }
182 /* mperfDelta is now the least number of MPERF ticks the processor made in
183 * a timespan of 0.03 s (e.g. 30 milliseconds)
184 */
185
186 if(aperfDelta > (1ULL<<32))
187 retval = 0;
188 else
189 {
190 retval = aperfDelta * 1000 / 30;
191 }
192 disable_PIT2();
193 return retval;
194}
195#endif
196
197/*
198 * Calculates the FSB and CPU frequencies using specific MSRs for each CPU
199 * - multi. is read from a specific MSR. In the case of Intel, there is:
200 * a max multi. (used to calculate the FSB freq.),
201 * and a current multi. (used to calculate the CPU freq.)
202 * - fsbFrequency = tscFrequency / multi
203 * - cpuFrequency = fsbFrequency * multi
204 */
205
206void scan_cpu(PlatformInfo_t *p)
207{
208uint64_ttscFrequency, fsbFrequency, cpuFrequency;
209uint64_tmsr;
210//, flex_ratio;
211uint8_tmaxcoef, maxdiv, currcoef, bus_ratio_max, currdiv;
212//const char *newratio;
213int /*len,*/ myfsb;
214uint8_t bus_ratio_min;
215uint32_t max_ratio, min_ratio;
216
217max_ratio = min_ratio = myfsb = bus_ratio_min = 0;
218maxcoef = maxdiv = bus_ratio_max = currcoef = currdiv = 0;
219
220/* get cpuid values */
221do_cpuid(0x00000000, p->CPU.CPUID[CPUID_0]);
222do_cpuid(0x00000001, p->CPU.CPUID[CPUID_1]);
223do_cpuid(0x00000002, p->CPU.CPUID[CPUID_2]);
224do_cpuid(0x00000003, p->CPU.CPUID[CPUID_3]);
225do_cpuid2(0x00000004, 0, p->CPU.CPUID[CPUID_4]);
226do_cpuid(0x80000000, p->CPU.CPUID[CPUID_80]);
227 if ((p->CPU.CPUID[CPUID_80][0] & 0x0000000f) >= 8) {
228 do_cpuid(0x80000008, p->CPU.CPUID[CPUID_88]);
229 do_cpuid(0x80000001, p->CPU.CPUID[CPUID_81]);
230}
231 else if ((p->CPU.CPUID[CPUID_80][0] & 0x0000000f) >= 1) {
232do_cpuid(0x80000001, p->CPU.CPUID[CPUID_81]);
233}
234
235
236#if DEBUG_CPU
237{
238inti;
239printf("CPUID Raw Values:\n");
240for (i=0; i<CPUID_MAX; i++) {
241printf("%02d: %08x-%08x-%08x-%08x\n", i,
242p->CPU.CPUID[i][0], p->CPU.CPUID[i][1],
243p->CPU.CPUID[i][2], p->CPU.CPUID[i][3]);
244}
245}
246getchar();
247#endif
248p->CPU.Vendor= p->CPU.CPUID[CPUID_0][1];
249p->CPU.Signature= p->CPU.CPUID[CPUID_1][0];
250p->CPU.Stepping= bitfield(p->CPU.CPUID[CPUID_1][0], 3, 0);
251p->CPU.Model= bitfield(p->CPU.CPUID[CPUID_1][0], 7, 4);
252p->CPU.Family= bitfield(p->CPU.CPUID[CPUID_1][0], 11, 8);
253p->CPU.ExtModel= bitfield(p->CPU.CPUID[CPUID_1][0], 19, 16);
254p->CPU.ExtFamily= bitfield(p->CPU.CPUID[CPUID_1][0], 27, 20);
255
256 p->CPU.Model += (p->CPU.ExtModel << 4);
257#if DEBUG_CPU
258printf("Enter cpuid_info\n");
259getchar();
260#endif
261
262 if (p->CPU.Vendor == CPUID_VENDOR_INTEL &&
263 p->CPU.Family == 0x06 &&
264 p->CPU.Model >= CPUID_MODEL_NEHALEM &&
265 p->CPU.Model != CPUID_MODEL_ATOM // MSR is *NOT* available on the Intel Atom CPU
266 )
267 {
268 msr = rdmsr64(MSR_CORE_THREAD_COUNT);// Undocumented MSR in Nehalem and newer CPUs
269 p->CPU.NoCores= bitfield((uint32_t)msr, 31, 16);// Using undocumented MSR to get actual values
270 p->CPU.NoThreads= bitfield((uint32_t)msr, 15, 0);// Using undocumented MSR to get actual values
271}
272 else if (p->CPU.Vendor == CPUID_VENDOR_AMD)
273 {
274 p->CPU.NoThreads= bitfield(p->CPU.CPUID[CPUID_1][1], 23, 16);
275 p->CPU.NoCores= bitfield(p->CPU.CPUID[CPUID_88][2], 7, 0) + 1;
276 }
277 else
278 {
279 p->CPU.NoThreads= bitfield(p->CPU.CPUID[CPUID_1][1], 23, 16);// Use previous method for Cores and Threads
280 p->CPU.NoCores= bitfield(p->CPU.CPUID[CPUID_4][0], 31, 26) + 1;
281}
282#if DEBUG_CPU
283printf("...OK\n");
284getchar();
285#endif
286
287/* get brand string (if supported) */
288/* Copyright: from Apple's XNU cpuid.c */
289if (p->CPU.CPUID[CPUID_80][0] > 0x80000004) {
290uint32_treg[4];
291 char str[128], *s;
292/*
293 * The brand string 48 bytes (max), guaranteed to
294 * be NULL terminated.
295 */
296do_cpuid(0x80000002, reg);
297bcopy((char *)reg, &str[0], 16);
298do_cpuid(0x80000003, reg);
299bcopy((char *)reg, &str[16], 16);
300do_cpuid(0x80000004, reg);
301bcopy((char *)reg, &str[32], 16);
302for (s = str; *s != '\0'; s++) {
303if (*s != ' ') break;
304}
305
306strlcpy(p->CPU.BrandString,s, sizeof(p->CPU.BrandString));
307
308if (!strncmp(p->CPU.BrandString, CPU_STRING_UNKNOWN, MIN(sizeof(p->CPU.BrandString), strlen(CPU_STRING_UNKNOWN) + 1))) {
309 /*
310 * This string means we have a firmware-programmable brand string,
311 * and the firmware couldn't figure out what sort of CPU we have.
312 */
313 p->CPU.BrandString[0] = '\0';
314 }
315}
316
317/* setup features */
318if ((bit(23) & p->CPU.CPUID[CPUID_1][3]) != 0) {
319p->CPU.Features |= CPU_FEATURE_MMX;
320}
321if ((bit(25) & p->CPU.CPUID[CPUID_1][3]) != 0) {
322p->CPU.Features |= CPU_FEATURE_SSE;
323}
324if ((bit(26) & p->CPU.CPUID[CPUID_1][3]) != 0) {
325p->CPU.Features |= CPU_FEATURE_SSE2;
326}
327if ((bit(0) & p->CPU.CPUID[CPUID_1][2]) != 0) {
328p->CPU.Features |= CPU_FEATURE_SSE3;
329}
330if ((bit(19) & p->CPU.CPUID[CPUID_1][2]) != 0) {
331p->CPU.Features |= CPU_FEATURE_SSE41;
332}
333if ((bit(20) & p->CPU.CPUID[CPUID_1][2]) != 0) {
334p->CPU.Features |= CPU_FEATURE_SSE42;
335}
336if ((bit(29) & p->CPU.CPUID[CPUID_81][3]) != 0) {
337p->CPU.Features |= CPU_FEATURE_EM64T;
338}
339if ((bit(5) & p->CPU.CPUID[CPUID_1][3]) != 0) {
340p->CPU.Features |= CPU_FEATURE_MSR;
341}
342//if ((bit(28) & p->CPU.CPUID[CPUID_1][3]) != 0) {
343if (p->CPU.NoThreads > p->CPU.NoCores) {
344p->CPU.Features |= CPU_FEATURE_HTT;
345}
346
347tscFrequency = measure_tsc_frequency();
348fsbFrequency = 0;
349cpuFrequency = tscFrequency;
350msr = rdmsr64(MSR_IA32_PERF_STATUS);
351DBG("msr(0x%x): ia32_perf_stat 0x%08x\n", MSR_IA32_PERF_STATUS, bitfield(msr, 31, 0));
352currcoef = bitfield(msr, 7, 0);
353//if (currcoef) {
354//fsbFrequency = cpuFrequency / currcoef;
355//} else {
356fsbFrequency = 133 * 1000000;
357//}
358
359#if 0
360if ((p->CPU.Vendor == CPUID_VENDOR_INTEL) && ((p->CPU.Family == 0x06)
361|| (p->CPU.Family == 0x0f)))
362{
363//int intelCPU = p->CPU.Model;
364if ((p->CPU.Family == 0x06 && p->CPU.Model >= 0x0c) || (p->CPU.Family == 0x0f && p->CPU.Model >= 0x03))
365{
366/* Nehalem CPU model */
367if (p->CPU.Family == 0x06 && (p->CPU.Model == CPU_MODEL_NEHALEM ||
368 p->CPU.Model == CPU_MODEL_FIELDS ||
369 p->CPU.Model == CPU_MODEL_DALES ||
370 p->CPU.Model == CPU_MODEL_DALES_32NM ||
371 p->CPU.Model == CPU_MODEL_WESTMERE ||
372 p->CPU.Model == CPU_MODEL_NEHALEM_EX ||
373 p->CPU.Model == CPU_MODEL_WESTMERE_EX ||
374 p->CPU.Model == CPU_MODEL_SANDY ||
375 p->CPU.Model == CPU_MODEL_SANDY_XEON))
376{
377msr = rdmsr64(MSR_PLATFORM_INFO);
378DBG("msr(0x%04x): platform_info %08x-%08x\n", MSR_PLATFORM_INFO,
379(msr >> 32) & 0xffffffff, msr & 0xffffffff);
380msr = rdmsr64(MSR_IA32_PERF_STATUS);
381DBG("msr(0x%x): ia32_perf_stat 0x%08x\n", MSR_IA32_PERF_STATUS, bitfield(msr, 31, 0));
382currcoef = bitfield(msr, 7, 0);
383if (currcoef) {
384fsbFrequency = cpuFrequency / currcoef;
385} else {
386fsbFrequency = 133 * 1000000;
387}
388DBG("initial values: FSB=%d CPU=%d\n", fsbFrequency, cpuFrequency);
389#if DEBUG_CPU
390getchar();
391#endif
392
393#if 0
394bus_ratio_max = bitfield(msr, 14, 8);
395 bus_ratio_min = bitfield(msr, 46, 40); //valv: not sure about this one (Remarq.1)
396//msr = rdmsr64(MSR_FLEX_RATIO);
397 msr = 0;
398DBG("msr(0x%04x): flex_ratio %08x\n", MSR_FLEX_RATIO, msr & 0xffffffff);
399if ((msr >> 16) & 0x01) {
400flex_ratio = bitfield(msr, 14, 8);
401/* bcc9: at least on the gigabyte h67ma-ud2h,
402 where the cpu multipler can't be changed to
403 allow overclocking, the flex_ratio msr has unexpected (to OSX)
404 contents. These contents cause mach_kernel to
405 fail to compute the bus ratio correctly, instead
406 causing the system to crash since tscGranularity
407 is inadvertently set to 0.
408*/
409if (flex_ratio == 0) {
410/* Clear bit 16 (evidently the
411 presence bit) */
412//wrmsr64(MSR_FLEX_RATIO, (msr & 0xFFFFFFFFFFFEFFFFULL));
413//msr = rdmsr64(MSR_FLEX_RATIO);
414 DBG("Unusable flex ratio detected. Patched MSR now %08x\n", bitfield(msr, 31, 0));
415} else {
416if (bus_ratio_max > flex_ratio) {
417bus_ratio_max = flex_ratio;
418}
419}
420}
421
422/*if (bus_ratio_max) {
423fsbFrequency = (tscFrequency / bus_ratio_max);
424} */
425//valv: Turbo Ratio Limit
426/*if ((intelCPU != 0x2e) && (intelCPU != 0x2f)) {
427msr = rdmsr64(MSR_TURBO_RATIO_LIMIT);
428cpuFrequency = bus_ratio_max * fsbFrequency;
429max_ratio = bus_ratio_max * 10;
430} else */
431//{
432cpuFrequency = tscFrequency;
433//}
434#endif
435/*if ((getValueForKey(kbusratio, &newratio, &len, &bootInfo->chameleonConfig)) && (len <= 4)) {
436max_ratio = atoi(newratio);
437max_ratio = (max_ratio * 10);
438if (len >= 3) max_ratio = (max_ratio + 5);
439
440verbose("Bus-Ratio: min=%d, max=%s\n", bus_ratio_min, newratio);
441
442// extreme overclockers may love 320 ;)
443if ((max_ratio >= min_ratio) && (max_ratio <= 320)) {
444cpuFrequency = (fsbFrequency * max_ratio) / 10;
445if (len >= 3) maxdiv = 1;
446else maxdiv = 0;
447} else {
448max_ratio = (bus_ratio_max * 10);
449}
450} else {
451max_ratio = currcoef;
452}
453*/
454max_ratio = currcoef;
455min_ratio = currcoef;
456//valv: to be uncommented if Remarq.1 didn't stick
457/*if(bus_ratio_max > 0) bus_ratio = flex_ratio;*/
458p->CPU.MaxRatio = max_ratio;
459p->CPU.MinRatio = min_ratio;
460
461//myfsb = fsbFrequency / 1000000;
462DBG("Sticking with [BCLK: %dhz, Bus-Ratio: %d]\n", fsbFrequency, max_ratio);
463currcoef = max_ratio;
464#if DEBUG_CPU
465getchar();
466#endif
467
468} else {
469msr = rdmsr64(MSR_IA32_PERF_STATUS);
470DBG("msr(0x%x): ia32_perf_stat 0x%08x\n", MSR_IA32_PERF_STATUS, bitfield(msr, 31, 0));
471currcoef = bitfield(msr, 12, 8);
472/* Non-integer bus ratio for the max-multi*/
473 maxdiv = bitfield(msr, 46, 46);
474/* Non-integer bus ratio for the current-multi (undocumented)*/
475 currdiv = bitfield(msr, 14, 14);
476
477if ((p->CPU.Family == 0x06 && p->CPU.Model >= 0x0e) || (p->CPU.Family == 0x0f)) // This will always be model >= 3
478{
479/* On these models, maxcoef defines TSC freq */
480 maxcoef = bitfield(msr, 44, 40);
481} else {
482/* On lower models, currcoef defines TSC freq */
483/* XXX */
484maxcoef = currcoef;
485}
486
487if (maxcoef) {
488if (maxdiv) {
489fsbFrequency = ((tscFrequency * 2) / ((maxcoef * 2) + 1));
490} else {
491fsbFrequency = (tscFrequency / maxcoef);
492}
493if (currdiv) {
494cpuFrequency = (fsbFrequency * ((currcoef * 2) + 1) / 2);
495} else {
496cpuFrequency = (fsbFrequency * currcoef);
497}
498DBG("max: %d%s current: %d%s\n", maxcoef, maxdiv ? ".5" : "",currcoef, currdiv ? ".5" : "");
499}
500}
501}
502/* Mobile CPU ? */
503//Slice - no more needed
504/* // DEBUG_CPU
505pause();
506
507
508
509msr = rdmsr64(MSR_IA32_PLATFORM_ID);
510DBG("msr(0x%04x): MSR_IA32_PLATFORM_ID 0x%08x\n", MSR_IA32_PLATFORM_ID, msr & 0xffffffff); //__LINE__ - source line number :)
511if (msr) {
512p->CPU.Mobile = FALSE;
513switch (p->CPU.Model) {
514case 0x0D:
515p->CPU.Mobile = TRUE; // CPU_FEATURE_MOBILE;
516break;
517case 0x0F:
518p->CPU.Mobile = FALSE; // CPU_FEATURE_MOBILE;
519break;
520case 0x02:
521case 0x03:
522case 0x04:
523case 0x06:
524p->CPU.Mobile = (rdmsr64(MSR_P4_EBC_FREQUENCY_ID) && (1 << 21));
525break;
526default:
527p->CPU.Mobile = (rdmsr64(MSR_IA32_PLATFORM_ID) && (1<<28));
528break;
529}
530if (p->CPU.Mobile) {
531p->CPU.Features |= CPU_FEATURE_MOBILE;
532}
533*/
534//}
535DBG("CPU is %s\n", p->CPU.Mobile?"Mobile":"Desktop");
536
537}
538else if((p->CPU.Vendor == CPUID_VENDOR_AMD) && (p->CPU.Family == 0x0f))
539 {
540 switch(p->CPU.ExtFamily)
541 {
542 case 0x00: /* K8 */
543 msr = rdmsr64(K8_FIDVID_STATUS);
544 maxcoef = bitfield(msr, 21, 16) / 2 + 4;
545 currcoef = bitfield(msr, 5, 0) / 2 + 4;
546 break;
547
548 case 0x01: /* K10 */
549 msr = rdmsr64(K10_COFVID_STATUS);
550 do_cpuid2(0x00000006, 0, p->CPU.CPUID[CPUID_6]);
551 if(bitfield(p->CPU.CPUID[CPUID_6][2], 0, 0) == 1) // EffFreq: effective frequency interface
552 {
553 //uint64_t mperf = measure_mperf_frequency();
554 uint64_t aperf = measure_aperf_frequency();
555 cpuFrequency = aperf;
556 }
557 // NOTE: tsc runs at the maccoeff (non turbo)
558 // *not* at the turbo frequency.
559 maxcoef = bitfield(msr, 54, 49) / 2 + 4;
560 currcoef = bitfield(msr, 5, 0) + 0x10;
561 currdiv = 2 << bitfield(msr, 8, 6);
562
563 break;
564
565 case 0x05: /* K14 */
566 msr = rdmsr64(K10_COFVID_STATUS);
567 currcoef = (bitfield(msr, 54, 49) + 0x10) << 2;
568 currdiv = (bitfield(msr, 8, 4) + 1) << 2;
569 currdiv += bitfield(msr, 3, 0);
570
571 break;
572
573 case 0x02: /* K11 */
574 // not implimented
575 break;
576 }
577
578 if (maxcoef)
579 {
580 if (currdiv)
581 {
582 if(!currcoef) currcoef = maxcoef;
583 if(!cpuFrequency)
584 fsbFrequency = ((tscFrequency * currdiv) / currcoef);
585 else
586 fsbFrequency = ((cpuFrequency * currdiv) / currcoef);
587
588 DBG("%d.%d\n", currcoef / currdiv, ((currcoef % currdiv) * 100) / currdiv);
589 } else {
590 if(!cpuFrequency)
591 fsbFrequency = (tscFrequency / maxcoef);
592 else
593 fsbFrequency = (cpuFrequency / maxcoef);
594 DBG("%d\n", currcoef);
595 }
596 }
597 else if (currcoef)
598 {
599 if (currdiv)
600 {
601 fsbFrequency = ((tscFrequency * currdiv) / currcoef);
602 DBG("%d.%d\n", currcoef / currdiv, ((currcoef % currdiv) * 100) / currdiv);
603 } else {
604 fsbFrequency = (tscFrequency / currcoef);
605 DBG("%d\n", currcoef);
606 }
607 }
608 if(!cpuFrequency) cpuFrequency = tscFrequency;
609 }
610#endif
611#if DEBUG_CPU
612DBG("ready to finish...\n");
613getchar();
614#endif
615
616
617p->CPU.MaxCoef = maxcoef;
618p->CPU.MaxDiv = maxdiv;
619p->CPU.CurrCoef = currcoef;
620p->CPU.CurrDiv = currdiv;
621p->CPU.TSCFrequency = tscFrequency;
622p->CPU.FSBFrequency = fsbFrequency;
623p->CPU.CPUFrequency = cpuFrequency;
624DBG("CPU: Brand: %s\n", p->CPU.BrandString);
625DBG("CPU: Vendor/Model/ExtModel: 0x%x/0x%x/0x%x\n", p->CPU.Vendor, p->CPU.Model, p->CPU.ExtModel);
626DBG("CPU: Family/ExtFamily: 0x%x/0x%x\n", p->CPU.Family, p->CPU.ExtFamily);
627DBG("CPU: MaxCoef/CurrCoef/Turbo: 0x%x/0x%x/0x%x\n", p->CPU.MaxCoef, p->CPU.CurrCoef, p->CPU.MaxCoef+1);
628DBG("CPU: MaxDiv/CurrDiv: 0x%x/0x%x\n", p->CPU.MaxDiv?2:1, p->CPU.CurrDiv?2:1);
629DBG("CPU: TSCFreq: %dMHz\n",p->CPU.TSCFrequency / 1000000);
630DBG("CPU: FSBFreq: %dMHz\n",p->CPU.FSBFrequency / 1000000);
631DBG("CPU: CPUFreq: %dMHz\n",p->CPU.CPUFrequency / 1000000);
632DBG("CPU: NoCores/NoThreads: %d/%d\n",p->CPU.NoCores, p->CPU.NoThreads);
633DBG("CPU: Features: 0x%08x\n",p->CPU.Features);
634#if DEBUG_CPU
635getchar();
636#endif
637}
638

Archive Download this file

Revision: 1290