Chameleon

Chameleon Svn Source Tree

Root/tags/2.1/i386/libsaio/cpu.c

Source at commit 2381 created 10 years 21 days ago.
By ifabio, Apply patch: (Credits to Thomas Jansen aka tja) - Reading options from all devices during boot. The options for the boot menu are only read from the devices rd(0,0) or bt(0,0). Consequently, boot menu options (e.g. "Quiet Boot", "Timeout", etc.) in plists on other devices (like most users have) are ignored. This patch extends the list of paths to search for the options plist on all devices that can be found.
1/*
2 * Copyright 2008 Islam Ahmed Zaid. All rights reserved. <azismed@gmail.com>
3 * AsereBLN: 2009: cleanup and bugfix
4 */
5
6#include "libsaio.h"
7#include "platform.h"
8#include "cpu.h"
9#include "bootstruct.h"
10#include "boot.h"
11
12#ifndef DEBUG_CPU
13#define DEBUG_CPU 0
14#endif
15
16#if DEBUG_CPU
17#define DBG(x...)printf(x)
18#else
19#define DBG(x...)msglog(x)
20#endif
21
22/*
23 * timeRDTSC()
24 * This routine sets up PIT counter 2 to count down 1/20 of a second.
25 * It pauses until the value is latched in the counter
26 * and then reads the time stamp counter to return to the caller.
27 */
28uint64_t timeRDTSC(void)
29{
30 intattempts = 0;
31 uint64_t latchTime;
32 uint64_tsaveTime,intermediate;
33 unsigned int timerValue, lastValue;
34 //boolean_tint_enabled;
35 /*
36 * Table of correction factors to account for
37 * - timer counter quantization errors, and
38 * - undercounts 0..5
39 */
40#define SAMPLE_CLKS_EXACT(((double) CLKNUM) / 20.0)
41#define SAMPLE_CLKS_INT((int) CLKNUM / 20)
42#define SAMPLE_NSECS(2000000000LL)
43#define SAMPLE_MULTIPLIER(((double)SAMPLE_NSECS)*SAMPLE_CLKS_EXACT)
44#define ROUND64(x)((uint64_t)((x) + 0.5))
45 uint64_tscale[6] = {
46ROUND64(SAMPLE_MULTIPLIER/(double)(SAMPLE_CLKS_INT-0)),
47ROUND64(SAMPLE_MULTIPLIER/(double)(SAMPLE_CLKS_INT-1)),
48ROUND64(SAMPLE_MULTIPLIER/(double)(SAMPLE_CLKS_INT-2)),
49ROUND64(SAMPLE_MULTIPLIER/(double)(SAMPLE_CLKS_INT-3)),
50ROUND64(SAMPLE_MULTIPLIER/(double)(SAMPLE_CLKS_INT-4)),
51ROUND64(SAMPLE_MULTIPLIER/(double)(SAMPLE_CLKS_INT-5))
52 };
53
54restart:
55 if (attempts >= 9) // increase to up to 9 attempts.
56 // This will flash-reboot. TODO: Use tscPanic instead.
57 printf("Timestamp counter calibation failed with %d attempts\n", attempts);
58 attempts++;
59 enable_PIT2();// turn on PIT2
60 set_PIT2(0);// reset timer 2 to be zero
61 latchTime = rdtsc64();// get the time stamp to time
62 latchTime = get_PIT2(&timerValue) - latchTime; // time how long this takes
63 set_PIT2(SAMPLE_CLKS_INT);// set up the timer for (almost) 1/20th a second
64 saveTime = rdtsc64();// now time how long a 20th a second is...
65 get_PIT2(&lastValue);
66 get_PIT2(&lastValue);// read twice, first value may be unreliable
67 do {
68intermediate = get_PIT2(&timerValue);
69if (timerValue > lastValue) {
70// Timer wrapped
71set_PIT2(0);
72disable_PIT2();
73goto restart;
74}
75lastValue = timerValue;
76 } while (timerValue > 5);
77 printf("timerValue %d\n",timerValue);
78 printf("intermediate 0x%016llx\n",intermediate);
79 printf("saveTime 0x%016llx\n",saveTime);
80
81 intermediate -= saveTime;// raw count for about 1/20 second
82 intermediate *= scale[timerValue];// rescale measured time spent
83 intermediate /= SAMPLE_NSECS;// so its exactly 1/20 a second
84 intermediate += latchTime;// add on our save fudge
85
86 set_PIT2(0);// reset timer 2 to be zero
87 disable_PIT2();// turn off PIT 2
88
89 return intermediate;
90}
91
92/*
93 * DFE: Measures the TSC frequency in Hz (64-bit) using the ACPI PM timer
94 */
95static uint64_t measure_tsc_frequency(void)
96{
97uint64_t tscStart;
98uint64_t tscEnd;
99uint64_t tscDelta = 0xffffffffffffffffULL;
100unsigned long pollCount;
101uint64_t retval = 0;
102int i;
103
104/* Time how many TSC ticks elapse in 30 msec using the 8254 PIT
105 * counter 2. We run this loop 3 times to make sure the cache
106 * is hot and we take the minimum delta from all of the runs.
107 * That is to say that we're biased towards measuring the minimum
108 * number of TSC ticks that occur while waiting for the timer to
109 * expire. That theoretically helps avoid inconsistencies when
110 * running under a VM if the TSC is not virtualized and the host
111 * steals time. The TSC is normally virtualized for VMware.
112 */
113for(i = 0; i < 10; ++i)
114{
115enable_PIT2();
116set_PIT2_mode0(CALIBRATE_LATCH);
117tscStart = rdtsc64();
118pollCount = poll_PIT2_gate();
119tscEnd = rdtsc64();
120/* The poll loop must have run at least a few times for accuracy */
121if (pollCount <= 1)
122continue;
123/* The TSC must increment at LEAST once every millisecond.
124 * We should have waited exactly 30 msec so the TSC delta should
125 * be >= 30. Anything less and the processor is way too slow.
126 */
127if ((tscEnd - tscStart) <= CALIBRATE_TIME_MSEC)
128continue;
129// tscDelta = MIN(tscDelta, (tscEnd - tscStart))
130if ( (tscEnd - tscStart) < tscDelta )
131tscDelta = tscEnd - tscStart;
132}
133/* tscDelta is now the least number of TSC ticks the processor made in
134 * a timespan of 0.03 s (e.g. 30 milliseconds)
135 * Linux thus divides by 30 which gives the answer in kiloHertz because
136 * 1 / ms = kHz. But we're xnu and most of the rest of the code uses
137 * Hz so we need to convert our milliseconds to seconds. Since we're
138 * dividing by the milliseconds, we simply multiply by 1000.
139 */
140
141/* Unlike linux, we're not limited to 32-bit, but we do need to take care
142 * that we're going to multiply by 1000 first so we do need at least some
143 * arithmetic headroom. For now, 32-bit should be enough.
144 * Also unlike Linux, our compiler can do 64-bit integer arithmetic.
145 */
146if (tscDelta > (1ULL<<32))
147retval = 0;
148else
149{
150retval = tscDelta * 1000 / 30;
151}
152disable_PIT2();
153return retval;
154}
155
156/*
157 * Original comment/code:
158 * "DFE: Measures the Max Performance Frequency in Hz (64-bit)"
159 *
160 * Measures the Actual Performance Frequency in Hz (64-bit)
161 * (just a naming change, mperf --> aperf )
162 */
163static uint64_t measure_aperf_frequency(void)
164{
165uint64_t aperfStart;
166uint64_t aperfEnd;
167uint64_t aperfDelta = 0xffffffffffffffffULL;
168unsigned long pollCount;
169uint64_t retval = 0;
170int i;
171
172/* Time how many APERF ticks elapse in 30 msec using the 8254 PIT
173 * counter 2. We run this loop 3 times to make sure the cache
174 * is hot and we take the minimum delta from all of the runs.
175 * That is to say that we're biased towards measuring the minimum
176 * number of APERF ticks that occur while waiting for the timer to
177 * expire.
178 */
179for(i = 0; i < 10; ++i)
180{
181enable_PIT2();
182set_PIT2_mode0(CALIBRATE_LATCH);
183aperfStart = rdmsr64(MSR_AMD_APERF);
184pollCount = poll_PIT2_gate();
185aperfEnd = rdmsr64(MSR_AMD_APERF);
186/* The poll loop must have run at least a few times for accuracy */
187if (pollCount <= 1)
188continue;
189/* The TSC must increment at LEAST once every millisecond.
190 * We should have waited exactly 30 msec so the APERF delta should
191 * be >= 30. Anything less and the processor is way too slow.
192 */
193if ((aperfEnd - aperfStart) <= CALIBRATE_TIME_MSEC)
194continue;
195// tscDelta = MIN(tscDelta, (tscEnd - tscStart))
196if ( (aperfEnd - aperfStart) < aperfDelta )
197aperfDelta = aperfEnd - aperfStart;
198}
199/* mperfDelta is now the least number of MPERF ticks the processor made in
200 * a timespan of 0.03 s (e.g. 30 milliseconds)
201 */
202
203if (aperfDelta > (1ULL<<32))
204retval = 0;
205else
206{
207retval = aperfDelta * 1000 / 30;
208}
209disable_PIT2();
210return retval;
211}
212
213/*
214 * Calculates the FSB and CPU frequencies using specific MSRs for each CPU
215 * - multi. is read from a specific MSR. In the case of Intel, there is:
216 * a max multi. (used to calculate the FSB freq.),
217 * and a current multi. (used to calculate the CPU freq.)
218 * - fsbFrequency = tscFrequency / multi
219 * - cpuFrequency = fsbFrequency * multi
220 */
221void scan_cpu(PlatformInfo_t *p)
222{
223uint64_ttscFrequency, fsbFrequency, cpuFrequency;
224uint64_tmsr, flex_ratio;
225uint8_tmaxcoef, maxdiv, currcoef, bus_ratio_max, currdiv;
226const char*newratio;
227intlen, myfsb;
228uint8_tbus_ratio_min;
229uint32_tmax_ratio, min_ratio;
230
231max_ratio = min_ratio = myfsb = bus_ratio_min = 0;
232maxcoef = maxdiv = bus_ratio_max = currcoef = currdiv = 0;
233
234/* get cpuid values */
235do_cpuid(0x00000000, p->CPU.CPUID[CPUID_0]);
236do_cpuid(0x00000001, p->CPU.CPUID[CPUID_1]);
237do_cpuid(0x00000002, p->CPU.CPUID[CPUID_2]);
238do_cpuid(0x00000003, p->CPU.CPUID[CPUID_3]);
239do_cpuid2(0x00000004, 0, p->CPU.CPUID[CPUID_4]);
240do_cpuid(0x80000000, p->CPU.CPUID[CPUID_80]);
241if (p->CPU.CPUID[CPUID_0][0] >= 0x5) {
242do_cpuid(5, p->CPU.CPUID[CPUID_5]);
243}
244if (p->CPU.CPUID[CPUID_0][0] >= 6) {
245do_cpuid(6, p->CPU.CPUID[CPUID_6]);
246}
247if ((p->CPU.CPUID[CPUID_80][0] & 0x0000000f) >= 8) {
248do_cpuid(0x80000008, p->CPU.CPUID[CPUID_88]);
249do_cpuid(0x80000001, p->CPU.CPUID[CPUID_81]);
250}
251else if ((p->CPU.CPUID[CPUID_80][0] & 0x0000000f) >= 1) {
252do_cpuid(0x80000001, p->CPU.CPUID[CPUID_81]);
253}
254
255#if DEBUG_CPU
256{
257inti;
258printf("CPUID Raw Values:\n");
259for (i=0; i<CPUID_MAX; i++) {
260printf("%02d: %08x-%08x-%08x-%08x\n", i,
261 p->CPU.CPUID[i][0], p->CPU.CPUID[i][1],
262 p->CPU.CPUID[i][2], p->CPU.CPUID[i][3]);
263}
264}
265#endif
266
267p->CPU.Vendor= p->CPU.CPUID[CPUID_0][1];
268p->CPU.Signature= p->CPU.CPUID[CPUID_1][0];
269p->CPU.Stepping= bitfield(p->CPU.CPUID[CPUID_1][0], 3, 0);
270p->CPU.Model= bitfield(p->CPU.CPUID[CPUID_1][0], 7, 4);
271p->CPU.Family= bitfield(p->CPU.CPUID[CPUID_1][0], 11, 8);
272p->CPU.ExtModel= bitfield(p->CPU.CPUID[CPUID_1][0], 19, 16);
273p->CPU.ExtFamily= bitfield(p->CPU.CPUID[CPUID_1][0], 27, 20);
274
275p->CPU.Model += (p->CPU.ExtModel << 4);
276
277if (p->CPU.Vendor == CPUID_VENDOR_INTEL &&
278p->CPU.Family == 0x06 &&
279p->CPU.Model >= CPUID_MODEL_NEHALEM &&
280p->CPU.Model != CPUID_MODEL_ATOM// MSR is *NOT* available on the Intel Atom CPU
281)
282{
283msr = rdmsr64(MSR_CORE_THREAD_COUNT);// Undocumented MSR in Nehalem and newer CPUs
284p->CPU.NoCores= bitfield((uint32_t)msr, 31, 16);// Using undocumented MSR to get actual values
285p->CPU.NoThreads= bitfield((uint32_t)msr, 15, 0);// Using undocumented MSR to get actual values
286}
287else if (p->CPU.Vendor == CPUID_VENDOR_AMD)
288{
289p->CPU.NoThreads= bitfield(p->CPU.CPUID[CPUID_1][1], 23, 16);
290p->CPU.NoCores= bitfield(p->CPU.CPUID[CPUID_88][2], 7, 0) + 1;
291}
292else
293{
294// Use previous method for Cores and Threads
295p->CPU.NoThreads= bitfield(p->CPU.CPUID[CPUID_1][1], 23, 16);
296p->CPU.NoCores= bitfield(p->CPU.CPUID[CPUID_4][0], 31, 26) + 1;
297}
298
299/* get brand string (if supported) */
300/* Copyright: from Apple's XNU cpuid.c */
301if (p->CPU.CPUID[CPUID_80][0] > 0x80000004) {
302uint32_treg[4];
303charstr[128], *s;
304/*
305 * The brand string 48 bytes (max), guaranteed to
306 * be NULL terminated.
307 */
308do_cpuid(0x80000002, reg);
309bcopy((char *)reg, &str[0], 16);
310do_cpuid(0x80000003, reg);
311bcopy((char *)reg, &str[16], 16);
312do_cpuid(0x80000004, reg);
313bcopy((char *)reg, &str[32], 16);
314for (s = str; *s != '\0'; s++) {
315if (*s != ' ') break;
316}
317
318strlcpy(p->CPU.BrandString, s, sizeof(p->CPU.BrandString));
319
320if (!strncmp(p->CPU.BrandString, CPU_STRING_UNKNOWN, MIN(sizeof(p->CPU.BrandString), strlen(CPU_STRING_UNKNOWN) + 1))) {
321/*
322 * This string means we have a firmware-programmable brand string,
323 * and the firmware couldn't figure out what sort of CPU we have.
324 */
325p->CPU.BrandString[0] = '\0';
326}
327}
328
329/* setup features */
330if ((bit(23) & p->CPU.CPUID[CPUID_1][3]) != 0) {
331p->CPU.Features |= CPU_FEATURE_MMX;
332}
333if ((bit(25) & p->CPU.CPUID[CPUID_1][3]) != 0) {
334p->CPU.Features |= CPU_FEATURE_SSE;
335}
336if ((bit(26) & p->CPU.CPUID[CPUID_1][3]) != 0) {
337p->CPU.Features |= CPU_FEATURE_SSE2;
338}
339if ((bit(0) & p->CPU.CPUID[CPUID_1][2]) != 0) {
340p->CPU.Features |= CPU_FEATURE_SSE3;
341}
342if ((bit(19) & p->CPU.CPUID[CPUID_1][2]) != 0) {
343p->CPU.Features |= CPU_FEATURE_SSE41;
344}
345if ((bit(20) & p->CPU.CPUID[CPUID_1][2]) != 0) {
346p->CPU.Features |= CPU_FEATURE_SSE42;
347}
348if ((bit(29) & p->CPU.CPUID[CPUID_81][3]) != 0) {
349p->CPU.Features |= CPU_FEATURE_EM64T;
350}
351if ((bit(5) & p->CPU.CPUID[CPUID_1][3]) != 0) {
352p->CPU.Features |= CPU_FEATURE_MSR;
353}
354//if ((bit(28) & p->CPU.CPUID[CPUID_1][3]) != 0) {
355if (p->CPU.NoThreads > p->CPU.NoCores) {
356p->CPU.Features |= CPU_FEATURE_HTT;
357}
358
359tscFrequency = measure_tsc_frequency();
360/* if usual method failed */
361if ( tscFrequency < 1000 )
362{
363tscFrequency = timeRDTSC() * 20;
364}
365fsbFrequency = 0;
366cpuFrequency = 0;
367
368if ((p->CPU.Vendor == CPUID_VENDOR_INTEL) && ((p->CPU.Family == 0x06) || (p->CPU.Family == 0x0f))) {
369int intelCPU = p->CPU.Model;
370if ((p->CPU.Family == 0x06 && p->CPU.Model >= 0x0c) || (p->CPU.Family == 0x0f && p->CPU.Model >= 0x03)) {
371/* Nehalem CPU model */
372if (p->CPU.Family == 0x06 && (p->CPU.Model == CPU_MODEL_NEHALEM ||
373 p->CPU.Model == CPU_MODEL_FIELDS ||
374 p->CPU.Model == CPU_MODEL_DALES ||
375 p->CPU.Model == CPU_MODEL_DALES_32NM ||
376 p->CPU.Model == CPU_MODEL_WESTMERE ||
377 p->CPU.Model == CPU_MODEL_NEHALEM_EX ||
378 p->CPU.Model == CPU_MODEL_WESTMERE_EX ||
379 p->CPU.Model == CPU_MODEL_SANDYBRIDGE ||
380 p->CPU.Model == CPU_MODEL_JAKETOWN ||
381 p->CPU.Model == CPU_MODEL_IVYBRIDGE)) {
382msr = rdmsr64(MSR_PLATFORM_INFO);
383DBG("msr(%d): platform_info %08x\n", __LINE__, bitfield(msr, 31, 0));
384bus_ratio_max = bitfield(msr, 14, 8);
385bus_ratio_min = bitfield(msr, 46, 40); //valv: not sure about this one (Remarq.1)
386msr = rdmsr64(MSR_FLEX_RATIO);
387DBG("msr(%d): flex_ratio %08x\n", __LINE__, bitfield(msr, 31, 0));
388if (bitfield(msr, 16, 16)) {
389flex_ratio = bitfield(msr, 14, 8);
390/* bcc9: at least on the gigabyte h67ma-ud2h,
391 where the cpu multipler can't be changed to
392 allow overclocking, the flex_ratio msr has unexpected (to OSX)
393 contents.These contents cause mach_kernel to
394 fail to compute the bus ratio correctly, instead
395 causing the system to crash since tscGranularity
396 is inadvertently set to 0.
397 */
398if (flex_ratio == 0) {
399/* Clear bit 16 (evidently the presence bit) */
400wrmsr64(MSR_FLEX_RATIO, (msr & 0xFFFFFFFFFFFEFFFFULL));
401msr = rdmsr64(MSR_FLEX_RATIO);
402verbose("Unusable flex ratio detected. Patched MSR now %08x\n", bitfield(msr, 31, 0));
403} else {
404if (bus_ratio_max > flex_ratio) {
405bus_ratio_max = flex_ratio;
406}
407}
408}
409
410if (bus_ratio_max) {
411fsbFrequency = (tscFrequency / bus_ratio_max);
412}
413//valv: Turbo Ratio Limit
414if ((intelCPU != 0x2e) && (intelCPU != 0x2f)) {
415msr = rdmsr64(MSR_TURBO_RATIO_LIMIT);
416cpuFrequency = bus_ratio_max * fsbFrequency;
417max_ratio = bus_ratio_max * 10;
418} else {
419cpuFrequency = tscFrequency;
420}
421if ((getValueForKey(kbusratio, &newratio, &len, &bootInfo->chameleonConfig)) && (len <= 4)) {
422max_ratio = atoi(newratio);
423max_ratio = (max_ratio * 10);
424if (len >= 3) max_ratio = (max_ratio + 5);
425
426verbose("Bus-Ratio: min=%d, max=%s\n", bus_ratio_min, newratio);
427
428// extreme overclockers may love 320 ;)
429if ((max_ratio >= min_ratio) && (max_ratio <= 320)) {
430cpuFrequency = (fsbFrequency * max_ratio) / 10;
431if (len >= 3) maxdiv = 1;
432else maxdiv = 0;
433} else {
434max_ratio = (bus_ratio_max * 10);
435}
436}
437//valv: to be uncommented if Remarq.1 didn't stick
438/*if (bus_ratio_max > 0) bus_ratio = flex_ratio;*/
439p->CPU.MaxRatio = max_ratio;
440p->CPU.MinRatio = min_ratio;
441
442myfsb = fsbFrequency / 1000000;
443verbose("Sticking with [BCLK: %dMhz, Bus-Ratio: %d]\n", myfsb, max_ratio);
444currcoef = bus_ratio_max;
445} else {
446msr = rdmsr64(MSR_IA32_PERF_STATUS);
447DBG("msr(%d): ia32_perf_stat 0x%08x\n", __LINE__, bitfield(msr, 31, 0));
448currcoef = bitfield(msr, 12, 8);
449/* Non-integer bus ratio for the max-multi*/
450maxdiv = bitfield(msr, 46, 46);
451/* Non-integer bus ratio for the current-multi (undocumented)*/
452currdiv = bitfield(msr, 14, 14);
453
454// This will always be model >= 3
455if ((p->CPU.Family == 0x06 && p->CPU.Model >= 0x0e) || (p->CPU.Family == 0x0f))
456{
457/* On these models, maxcoef defines TSC freq */
458maxcoef = bitfield(msr, 44, 40);
459} else {
460/* On lower models, currcoef defines TSC freq */
461/* XXX */
462maxcoef = currcoef;
463}
464
465if (maxcoef) {
466if (maxdiv) {
467fsbFrequency = ((tscFrequency * 2) / ((maxcoef * 2) + 1));
468} else {
469fsbFrequency = (tscFrequency / maxcoef);
470}
471if (currdiv) {
472cpuFrequency = (fsbFrequency * ((currcoef * 2) + 1) / 2);
473} else {
474cpuFrequency = (fsbFrequency * currcoef);
475}
476DBG("max: %d%s current: %d%s\n", maxcoef, maxdiv ? ".5" : "",currcoef, currdiv ? ".5" : "");
477}
478}
479}
480/* Mobile CPU */
481if (rdmsr64(MSR_IA32_PLATFORM_ID) & (1<<28)) {
482p->CPU.Features |= CPU_FEATURE_MOBILE;
483}
484}
485else if ((p->CPU.Vendor == CPUID_VENDOR_AMD) && (p->CPU.Family == 0x0f))
486{
487switch(p->CPU.ExtFamily)
488{
489case 0x00: /* K8 */
490msr = rdmsr64(K8_FIDVID_STATUS);
491maxcoef = bitfield(msr, 21, 16) / 2 + 4;
492currcoef = bitfield(msr, 5, 0) / 2 + 4;
493break;
494
495case 0x01: /* K10 */
496msr = rdmsr64(K10_COFVID_STATUS);
497do_cpuid2(0x00000006, 0, p->CPU.CPUID[CPUID_6]);
498// EffFreq: effective frequency interface
499if (bitfield(p->CPU.CPUID[CPUID_6][2], 0, 0) == 1)
500{
501//uint64_t mperf = measure_mperf_frequency();
502uint64_t aperf = measure_aperf_frequency();
503cpuFrequency = aperf;
504}
505// NOTE: tsc runs at the maccoeff (non turbo)
506//*not* at the turbo frequency.
507maxcoef = bitfield(msr, 54, 49) / 2 + 4;
508currcoef = bitfield(msr, 5, 0) + 0x10;
509currdiv = 2 << bitfield(msr, 8, 6);
510
511break;
512
513case 0x05: /* K14 */
514msr = rdmsr64(K10_COFVID_STATUS);
515currcoef = (bitfield(msr, 54, 49) + 0x10) << 2;
516currdiv = (bitfield(msr, 8, 4) + 1) << 2;
517currdiv += bitfield(msr, 3, 0);
518
519break;
520
521case 0x02: /* K11 */
522// not implimented
523break;
524}
525
526if (maxcoef)
527{
528if (currdiv)
529{
530if (!currcoef) currcoef = maxcoef;
531if (!cpuFrequency)
532fsbFrequency = ((tscFrequency * currdiv) / currcoef);
533else
534fsbFrequency = ((cpuFrequency * currdiv) / currcoef);
535
536DBG("%d.%d\n", currcoef / currdiv, ((currcoef % currdiv) * 100) / currdiv);
537} else {
538if (!cpuFrequency)
539fsbFrequency = (tscFrequency / maxcoef);
540else
541fsbFrequency = (cpuFrequency / maxcoef);
542DBG("%d\n", currcoef);
543}
544}
545else if (currcoef)
546{
547if (currdiv)
548{
549fsbFrequency = ((tscFrequency * currdiv) / currcoef);
550DBG("%d.%d\n", currcoef / currdiv, ((currcoef % currdiv) * 100) / currdiv);
551} else {
552fsbFrequency = (tscFrequency / currcoef);
553DBG("%d\n", currcoef);
554}
555}
556if (!cpuFrequency) cpuFrequency = tscFrequency;
557}
558
559#if 0
560if (!fsbFrequency) {
561fsbFrequency = (DEFAULT_FSB * 1000);
562cpuFrequency = tscFrequency;
563DBG("0 ! using the default value for FSB !\n");
564}
565#endif
566
567p->CPU.MaxCoef = maxcoef;
568p->CPU.MaxDiv = maxdiv;
569p->CPU.CurrCoef = currcoef;
570p->CPU.CurrDiv = currdiv;
571p->CPU.TSCFrequency = tscFrequency;
572p->CPU.FSBFrequency = fsbFrequency;
573p->CPU.CPUFrequency = cpuFrequency;
574
575// keep formatted with spaces instead of tabs
576DBG("CPU: Brand String: %s\n", p->CPU.BrandString);
577 DBG("CPU: Vendor/Family/ExtFamily: 0x%x/0x%x/0x%x\n", p->CPU.Vendor, p->CPU.Family, p->CPU.ExtFamily);
578 DBG("CPU: Model/ExtModel/Stepping: 0x%x/0x%x/0x%x\n", p->CPU.Model, p->CPU.ExtModel, p->CPU.Stepping);
579 DBG("CPU: MaxCoef/CurrCoef: 0x%x/0x%x\n", p->CPU.MaxCoef, p->CPU.CurrCoef);
580 DBG("CPU: MaxDiv/CurrDiv: 0x%x/0x%x\n", p->CPU.MaxDiv, p->CPU.CurrDiv);
581 DBG("CPU: TSCFreq: %dMHz\n", p->CPU.TSCFrequency / 1000000);
582 DBG("CPU: FSBFreq: %dMHz\n", p->CPU.FSBFrequency / 1000000);
583 DBG("CPU: CPUFreq: %dMHz\n", p->CPU.CPUFrequency / 1000000);
584 DBG("CPU: NoCores/NoThreads: %d/%d\n", p->CPU.NoCores, p->CPU.NoThreads);
585 DBG("CPU: Features: 0x%08x\n", p->CPU.Features);
586#if DEBUG_CPU
587pause();
588#endif
589}
590

Archive Download this file

Revision: 2381