Chameleon

Chameleon Svn Source Tree

Root/trunk/i386/libsaio/cpu.c

1/*
2 * Copyright 2008 Islam Ahmed Zaid. All rights reserved. <azismed@gmail.com>
3 * AsereBLN: 2009: cleanup and bugfix
4 */
5
6#include "libsaio.h"
7#include "platform.h"
8#include "cpu.h"
9#include "bootstruct.h"
10#include "boot.h"
11
12#ifndef DEBUG_CPU
13#define DEBUG_CPU 0
14#endif
15
16#if DEBUG_CPU
17#define DBG(x...)printf(x)
18#else
19#define DBG(x...)msglog(x)
20#endif
21
22/*
23 * timeRDTSC()
24 * This routine sets up PIT counter 2 to count down 1/20 of a second.
25 * It pauses until the value is latched in the counter
26 * and then reads the time stamp counter to return to the caller.
27 */
28uint64_t timeRDTSC(void)
29{
30intattempts = 0;
31uint64_t latchTime;
32uint64_tsaveTime,intermediate;
33unsigned int timerValue, lastValue;
34//boolean_tint_enabled;
35/*
36 * Table of correction factors to account for
37 * - timer counter quantization errors, and
38 * - undercounts 0..5
39 */
40#define SAMPLE_CLKS_EXACT(((double) CLKNUM) / 20.0)
41#define SAMPLE_CLKS_INT((int) CLKNUM / 20)
42#define SAMPLE_NSECS(2000000000LL)
43#define SAMPLE_MULTIPLIER(((double)SAMPLE_NSECS)*SAMPLE_CLKS_EXACT)
44#define ROUND64(x)((uint64_t)((x) + 0.5))
45uint64_tscale[6] = {
46ROUND64(SAMPLE_MULTIPLIER/(double)(SAMPLE_CLKS_INT-0)),
47ROUND64(SAMPLE_MULTIPLIER/(double)(SAMPLE_CLKS_INT-1)),
48ROUND64(SAMPLE_MULTIPLIER/(double)(SAMPLE_CLKS_INT-2)),
49ROUND64(SAMPLE_MULTIPLIER/(double)(SAMPLE_CLKS_INT-3)),
50ROUND64(SAMPLE_MULTIPLIER/(double)(SAMPLE_CLKS_INT-4)),
51ROUND64(SAMPLE_MULTIPLIER/(double)(SAMPLE_CLKS_INT-5))
52};
53
54restart:
55if (attempts >= 9) // increase to up to 9 attempts.
56{
57 // This will flash-reboot. TODO: Use tscPanic instead.
58printf("Timestamp counter calibation failed with %d attempts\n", attempts);
59}
60attempts++;
61enable_PIT2();// turn on PIT2
62set_PIT2(0);// reset timer 2 to be zero
63latchTime = rdtsc64();// get the time stamp to time
64latchTime = get_PIT2(&timerValue) - latchTime; // time how long this takes
65set_PIT2(SAMPLE_CLKS_INT);// set up the timer for (almost) 1/20th a second
66saveTime = rdtsc64();// now time how long a 20th a second is...
67get_PIT2(&lastValue);
68get_PIT2(&lastValue);// read twice, first value may be unreliable
69do {
70intermediate = get_PIT2(&timerValue);
71if (timerValue > lastValue)
72{
73// Timer wrapped
74set_PIT2(0);
75disable_PIT2();
76goto restart;
77}
78lastValue = timerValue;
79} while (timerValue > 5);
80printf("timerValue %d\n",timerValue);
81printf("intermediate 0x%016llx\n",intermediate);
82printf("saveTime 0x%016llx\n",saveTime);
83
84intermediate -= saveTime;// raw count for about 1/20 second
85intermediate *= scale[timerValue];// rescale measured time spent
86intermediate /= SAMPLE_NSECS;// so its exactly 1/20 a second
87intermediate += latchTime;// add on our save fudge
88
89set_PIT2(0);// reset timer 2 to be zero
90disable_PIT2();// turn off PIT 2
91
92return intermediate;
93}
94
95/*
96 * DFE: Measures the TSC frequency in Hz (64-bit) using the ACPI PM timer
97 */
98static uint64_t measure_tsc_frequency(void)
99{
100uint64_t tscStart;
101uint64_t tscEnd;
102uint64_t tscDelta = 0xffffffffffffffffULL;
103unsigned long pollCount;
104uint64_t retval = 0;
105int i;
106
107/* Time how many TSC ticks elapse in 30 msec using the 8254 PIT
108 * counter 2. We run this loop 3 times to make sure the cache
109 * is hot and we take the minimum delta from all of the runs.
110 * That is to say that we're biased towards measuring the minimum
111 * number of TSC ticks that occur while waiting for the timer to
112 * expire. That theoretically helps avoid inconsistencies when
113 * running under a VM if the TSC is not virtualized and the host
114 * steals time. The TSC is normally virtualized for VMware.
115 */
116for(i = 0; i < 10; ++i)
117{
118enable_PIT2();
119set_PIT2_mode0(CALIBRATE_LATCH);
120tscStart = rdtsc64();
121pollCount = poll_PIT2_gate();
122tscEnd = rdtsc64();
123/* The poll loop must have run at least a few times for accuracy */
124if (pollCount <= 1)
125{
126continue;
127}
128/* The TSC must increment at LEAST once every millisecond.
129 * We should have waited exactly 30 msec so the TSC delta should
130 * be >= 30. Anything less and the processor is way too slow.
131 */
132if ((tscEnd - tscStart) <= CALIBRATE_TIME_MSEC)
133{
134continue;
135}
136// tscDelta = MIN(tscDelta, (tscEnd - tscStart))
137if ( (tscEnd - tscStart) < tscDelta )
138{
139tscDelta = tscEnd - tscStart;
140}
141}
142/* tscDelta is now the least number of TSC ticks the processor made in
143 * a timespan of 0.03 s (e.g. 30 milliseconds)
144 * Linux thus divides by 30 which gives the answer in kiloHertz because
145 * 1 / ms = kHz. But we're xnu and most of the rest of the code uses
146 * Hz so we need to convert our milliseconds to seconds. Since we're
147 * dividing by the milliseconds, we simply multiply by 1000.
148 */
149
150/* Unlike linux, we're not limited to 32-bit, but we do need to take care
151 * that we're going to multiply by 1000 first so we do need at least some
152 * arithmetic headroom. For now, 32-bit should be enough.
153 * Also unlike Linux, our compiler can do 64-bit integer arithmetic.
154 */
155if (tscDelta > (1ULL<<32))
156{
157retval = 0;
158}
159else
160{
161retval = tscDelta * 1000 / 30;
162}
163disable_PIT2();
164return retval;
165}
166
167/*
168 * Original comment/code:
169 * "DFE: Measures the Max Performance Frequency in Hz (64-bit)"
170 *
171 * Measures the Actual Performance Frequency in Hz (64-bit)
172 * (just a naming change, mperf --> aperf )
173 */
174static uint64_t measure_aperf_frequency(void)
175{
176uint64_t aperfStart;
177uint64_t aperfEnd;
178uint64_t aperfDelta = 0xffffffffffffffffULL;
179unsigned long pollCount;
180uint64_t retval = 0;
181int i;
182
183/* Time how many APERF ticks elapse in 30 msec using the 8254 PIT
184 * counter 2. We run this loop 3 times to make sure the cache
185 * is hot and we take the minimum delta from all of the runs.
186 * That is to say that we're biased towards measuring the minimum
187 * number of APERF ticks that occur while waiting for the timer to
188 * expire.
189 */
190for(i = 0; i < 10; ++i)
191{
192enable_PIT2();
193set_PIT2_mode0(CALIBRATE_LATCH);
194aperfStart = rdmsr64(MSR_AMD_APERF);
195pollCount = poll_PIT2_gate();
196aperfEnd = rdmsr64(MSR_AMD_APERF);
197/* The poll loop must have run at least a few times for accuracy */
198if (pollCount <= 1)
199{
200continue;
201}
202/* The TSC must increment at LEAST once every millisecond.
203 * We should have waited exactly 30 msec so the APERF delta should
204 * be >= 30. Anything less and the processor is way too slow.
205 */
206if ((aperfEnd - aperfStart) <= CALIBRATE_TIME_MSEC)
207{
208continue;
209}
210// tscDelta = MIN(tscDelta, (tscEnd - tscStart))
211if ( (aperfEnd - aperfStart) < aperfDelta )
212{
213aperfDelta = aperfEnd - aperfStart;
214}
215}
216/* mperfDelta is now the least number of MPERF ticks the processor made in
217 * a timespan of 0.03 s (e.g. 30 milliseconds)
218 */
219
220if (aperfDelta > (1ULL<<32))
221{
222retval = 0;
223}
224else
225{
226retval = aperfDelta * 1000 / 30;
227}
228disable_PIT2();
229return retval;
230}
231
232/*
233 * Calculates the FSB and CPU frequencies using specific MSRs for each CPU
234 * - multi. is read from a specific MSR. In the case of Intel, there is:
235 * a max multi. (used to calculate the FSB freq.),
236 * and a current multi. (used to calculate the CPU freq.)
237 * - fsbFrequency = tscFrequency / multi
238 * - cpuFrequency = fsbFrequency * multi
239 */
240void scan_cpu(PlatformInfo_t *p)
241{
242uint64_ttscFrequency, fsbFrequency, cpuFrequency;
243uint64_tmsr, flex_ratio;
244uint8_tmaxcoef, maxdiv, currcoef, bus_ratio_max, currdiv;
245const char*newratio;
246intlen, myfsb;
247uint8_tbus_ratio_min;
248uint32_tmax_ratio, min_ratio;
249
250max_ratio = min_ratio = myfsb = bus_ratio_min = 0;
251maxcoef = maxdiv = bus_ratio_max = currcoef = currdiv = 0;
252
253/* get cpuid values */
254do_cpuid(0x00000000, p->CPU.CPUID[CPUID_0]);
255do_cpuid(0x00000001, p->CPU.CPUID[CPUID_1]);
256do_cpuid(0x00000002, p->CPU.CPUID[CPUID_2]);
257do_cpuid(0x00000003, p->CPU.CPUID[CPUID_3]);
258do_cpuid2(0x00000004, 0, p->CPU.CPUID[CPUID_4]);
259do_cpuid(0x80000000, p->CPU.CPUID[CPUID_80]);
260if (p->CPU.CPUID[CPUID_0][0] >= 0x5)
261{
262do_cpuid(5, p->CPU.CPUID[CPUID_5]);
263}
264if (p->CPU.CPUID[CPUID_0][0] >= 6)
265{
266do_cpuid(6, p->CPU.CPUID[CPUID_6]);
267}
268if ((p->CPU.CPUID[CPUID_80][0] & 0x0000000f) >= 8)
269{
270do_cpuid(0x80000008, p->CPU.CPUID[CPUID_88]);
271do_cpuid(0x80000001, p->CPU.CPUID[CPUID_81]);
272}
273else if ((p->CPU.CPUID[CPUID_80][0] & 0x0000000f) >= 1)
274{
275do_cpuid(0x80000001, p->CPU.CPUID[CPUID_81]);
276}
277
278#if DEBUG_CPU
279{
280inti;
281printf("CPUID Raw Values:\n");
282for (i=0; i<CPUID_MAX; i++)
283{
284printf("%02d: %08x-%08x-%08x-%08x\n", i,
285 p->CPU.CPUID[i][0], p->CPU.CPUID[i][1],
286 p->CPU.CPUID[i][2], p->CPU.CPUID[i][3]);
287}
288}
289#endif
290
291p->CPU.Vendor= p->CPU.CPUID[CPUID_0][1];
292p->CPU.Signature= p->CPU.CPUID[CPUID_1][0];
293p->CPU.Stepping= bitfield(p->CPU.CPUID[CPUID_1][0], 3, 0);
294p->CPU.Model= bitfield(p->CPU.CPUID[CPUID_1][0], 7, 4);
295p->CPU.Family= bitfield(p->CPU.CPUID[CPUID_1][0], 11, 8);
296p->CPU.ExtModel= bitfield(p->CPU.CPUID[CPUID_1][0], 19, 16);
297p->CPU.ExtFamily= bitfield(p->CPU.CPUID[CPUID_1][0], 27, 20);
298
299p->CPU.Model += (p->CPU.ExtModel << 4);
300
301if (p->CPU.Vendor == CPUID_VENDOR_INTEL &&
302p->CPU.Family == 0x06 &&
303p->CPU.Model >= CPU_MODEL_NEHALEM &&
304p->CPU.Model != CPU_MODEL_ATOM// MSR is *NOT* available on the Intel Atom CPU
305)
306{
307msr = rdmsr64(MSR_CORE_THREAD_COUNT);// Undocumented MSR in Nehalem and newer CPUs
308p->CPU.NoCores= bitfield((uint32_t)msr, 31, 16);// Using undocumented MSR to get actual values
309p->CPU.NoThreads= bitfield((uint32_t)msr, 15, 0);// Using undocumented MSR to get actual values
310}
311else if (p->CPU.Vendor == CPUID_VENDOR_AMD)
312{
313p->CPU.NoThreads= bitfield(p->CPU.CPUID[CPUID_1][1], 23, 16);
314p->CPU.NoCores= bitfield(p->CPU.CPUID[CPUID_88][2], 7, 0) + 1;
315}
316else
317{
318// Use previous method for Cores and Threads
319p->CPU.NoThreads= bitfield(p->CPU.CPUID[CPUID_1][1], 23, 16);
320p->CPU.NoCores= bitfield(p->CPU.CPUID[CPUID_4][0], 31, 26) + 1;
321}
322
323/* get brand string (if supported) */
324/* Copyright: from Apple's XNU cpuid.c */
325if (p->CPU.CPUID[CPUID_80][0] > 0x80000004)
326{
327uint32_treg[4];
328charstr[128], *s;
329/*
330 * The brand string 48 bytes (max), guaranteed to
331 * be NULL terminated.
332 */
333do_cpuid(0x80000002, reg);
334bcopy((char *)reg, &str[0], 16);
335do_cpuid(0x80000003, reg);
336bcopy((char *)reg, &str[16], 16);
337do_cpuid(0x80000004, reg);
338bcopy((char *)reg, &str[32], 16);
339for (s = str; *s != '\0'; s++)
340{
341if (*s != ' ')
342{
343break;
344}
345}
346
347strlcpy(p->CPU.BrandString, s, sizeof(p->CPU.BrandString));
348
349if (!strncmp(p->CPU.BrandString, CPU_STRING_UNKNOWN, MIN(sizeof(p->CPU.BrandString), strlen(CPU_STRING_UNKNOWN) + 1)))
350{
351/*
352 * This string means we have a firmware-programmable brand string,
353 * and the firmware couldn't figure out what sort of CPU we have.
354 */
355p->CPU.BrandString[0] = '\0';
356}
357}
358
359/* setup features */
360if ((bit(23) & p->CPU.CPUID[CPUID_1][3]) != 0)
361{
362p->CPU.Features |= CPU_FEATURE_MMX;
363}
364if ((bit(25) & p->CPU.CPUID[CPUID_1][3]) != 0)
365{
366p->CPU.Features |= CPU_FEATURE_SSE;
367}
368if ((bit(26) & p->CPU.CPUID[CPUID_1][3]) != 0)
369{
370p->CPU.Features |= CPU_FEATURE_SSE2;
371}
372if ((bit(0) & p->CPU.CPUID[CPUID_1][2]) != 0)
373{
374p->CPU.Features |= CPU_FEATURE_SSE3;
375}
376if ((bit(19) & p->CPU.CPUID[CPUID_1][2]) != 0)
377{
378p->CPU.Features |= CPU_FEATURE_SSE41;
379}
380if ((bit(20) & p->CPU.CPUID[CPUID_1][2]) != 0)
381{
382p->CPU.Features |= CPU_FEATURE_SSE42;
383}
384if ((bit(29) & p->CPU.CPUID[CPUID_81][3]) != 0)
385{
386p->CPU.Features |= CPU_FEATURE_EM64T;
387}
388if ((bit(5) & p->CPU.CPUID[CPUID_1][3]) != 0)
389{
390p->CPU.Features |= CPU_FEATURE_MSR;
391}
392//if ((bit(28) & p->CPU.CPUID[CPUID_1][3]) != 0) {
393if (p->CPU.NoThreads > p->CPU.NoCores)
394{
395p->CPU.Features |= CPU_FEATURE_HTT;
396}
397
398tscFrequency = measure_tsc_frequency();
399/* if usual method failed */
400if ( tscFrequency < 1000 )
401{
402tscFrequency = timeRDTSC() * 20;
403}
404fsbFrequency = 0;
405cpuFrequency = 0;
406
407if ((p->CPU.Vendor == CPUID_VENDOR_INTEL) && ((p->CPU.Family == 0x06) || (p->CPU.Family == 0x0f)))
408{
409int intelCPU = p->CPU.Model;
410if ((p->CPU.Family == 0x06 && p->CPU.Model >= 0x0c) || (p->CPU.Family == 0x0f && p->CPU.Model >= 0x03))
411{
412/* Nehalem CPU model */
413if (p->CPU.Family == 0x06 && (p->CPU.Model == CPU_MODEL_NEHALEM||
414 p->CPU.Model == CPU_MODEL_FIELDS||
415 p->CPU.Model == CPU_MODEL_DALES||
416 p->CPU.Model == CPU_MODEL_DALES_32NM||
417 p->CPU.Model == CPU_MODEL_WESTMERE||
418 p->CPU.Model == CPU_MODEL_NEHALEM_EX||
419 p->CPU.Model == CPU_MODEL_WESTMERE_EX ||
420 p->CPU.Model == CPU_MODEL_SANDYBRIDGE ||
421 p->CPU.Model == CPU_MODEL_JAKETOWN ||
422 p->CPU.Model == CPU_MODEL_IVYBRIDGE_XEON||
423 p->CPU.Model == CPU_MODEL_IVYBRIDGE ||
424 p->CPU.Model == CPU_MODEL_HASWELL ||
425 p->CPU.Model == CPU_MODEL_HASWELL_MB ||
426 //p->CPU.Model == CPU_MODEL_HASWELL_H ||
427 p->CPU.Model == CPU_MODEL_HASWELL_ULT ||
428 p->CPU.Model == CPU_MODEL_HASWELL_ULX ))
429{
430msr = rdmsr64(MSR_PLATFORM_INFO);
431DBG("msr(%d): platform_info %08x\n", __LINE__, bitfield(msr, 31, 0));
432bus_ratio_max = bitfield(msr, 15, 8);
433bus_ratio_min = bitfield(msr, 47, 40); //valv: not sure about this one (Remarq.1)
434msr = rdmsr64(MSR_FLEX_RATIO);
435DBG("msr(%d): flex_ratio %08x\n", __LINE__, bitfield(msr, 31, 0));
436if (bitfield(msr, 16, 16))
437{
438flex_ratio = bitfield(msr, 15, 8);
439/* bcc9: at least on the gigabyte h67ma-ud2h,
440 where the cpu multipler can't be changed to
441 allow overclocking, the flex_ratio msr has unexpected (to OSX)
442 contents.These contents cause mach_kernel to
443 fail to compute the bus ratio correctly, instead
444 causing the system to crash since tscGranularity
445 is inadvertently set to 0.
446 */
447if (flex_ratio == 0)
448{
449/* Clear bit 16 (evidently the presence bit) */
450wrmsr64(MSR_FLEX_RATIO, (msr & 0xFFFFFFFFFFFEFFFFULL));
451msr = rdmsr64(MSR_FLEX_RATIO);
452verbose("Unusable flex ratio detected. Patched MSR now %08x\n", bitfield(msr, 31, 0));
453}
454else
455{
456if (bus_ratio_max > flex_ratio)
457{
458bus_ratio_max = flex_ratio;
459}
460}
461}
462
463if (bus_ratio_max)
464{
465fsbFrequency = (tscFrequency / bus_ratio_max);
466}
467//valv: Turbo Ratio Limit
468if ((intelCPU != 0x2e) && (intelCPU != 0x2f))
469{
470msr = rdmsr64(MSR_TURBO_RATIO_LIMIT);
471cpuFrequency = bus_ratio_max * fsbFrequency;
472max_ratio = bus_ratio_max * 10;
473}
474else
475{
476cpuFrequency = tscFrequency;
477}
478if ((getValueForKey(kbusratio, &newratio, &len, &bootInfo->chameleonConfig)) && (len <= 4))
479{
480max_ratio = atoi(newratio);
481max_ratio = (max_ratio * 10);
482if (len >= 3)
483{
484max_ratio = (max_ratio + 5);
485}
486
487verbose("Bus-Ratio: min=%d, max=%s\n", bus_ratio_min, newratio);
488
489// extreme overclockers may love 320 ;)
490if ((max_ratio >= min_ratio) && (max_ratio <= 320))
491{
492cpuFrequency = (fsbFrequency * max_ratio) / 10;
493if (len >= 3)
494{
495maxdiv = 1;
496}
497else
498{
499maxdiv = 0;
500}
501}
502else
503{
504max_ratio = (bus_ratio_max * 10);
505}
506}
507//valv: to be uncommented if Remarq.1 didn't stick
508/*if (bus_ratio_max > 0) bus_ratio = flex_ratio;*/
509p->CPU.MaxRatio = max_ratio;
510p->CPU.MinRatio = min_ratio;
511
512myfsb = fsbFrequency / 1000000;
513verbose("Sticking with [BCLK: %dMhz, Bus-Ratio: %d]\n", myfsb, max_ratio);
514currcoef = bus_ratio_max;
515}
516else
517{
518msr = rdmsr64(MSR_IA32_PERF_STATUS);
519DBG("msr(%d): ia32_perf_stat 0x%08x\n", __LINE__, bitfield(msr, 31, 0));
520currcoef = bitfield(msr, 15, 8);
521/* Non-integer bus ratio for the max-multi*/
522maxdiv = bitfield(msr, 46, 46);
523/* Non-integer bus ratio for the current-multi (undocumented)*/
524currdiv = bitfield(msr, 14, 14);
525
526// This will always be model >= 3
527if ((p->CPU.Family == 0x06 && p->CPU.Model >= 0x0e) || (p->CPU.Family == 0x0f))
528{
529/* On these models, maxcoef defines TSC freq */
530maxcoef = bitfield(msr, 44, 40);
531}
532else
533{
534/* On lower models, currcoef defines TSC freq */
535/* XXX */
536maxcoef = currcoef;
537}
538
539if (maxcoef)
540{
541if (maxdiv)
542{
543fsbFrequency = ((tscFrequency * 2) / ((maxcoef * 2) + 1));
544}
545else
546{
547fsbFrequency = (tscFrequency / maxcoef);
548}
549if (currdiv)
550{
551cpuFrequency = (fsbFrequency * ((currcoef * 2) + 1) / 2);
552}
553else
554{
555cpuFrequency = (fsbFrequency * currcoef);
556}
557DBG("max: %d%s current: %d%s\n", maxcoef, maxdiv ? ".5" : "",currcoef, currdiv ? ".5" : "");
558}
559}
560}
561/* Mobile CPU */
562if (rdmsr64(MSR_IA32_PLATFORM_ID) & (1<<28))
563{
564p->CPU.Features |= CPU_FEATURE_MOBILE;
565}
566}
567else if ((p->CPU.Vendor == CPUID_VENDOR_AMD) && (p->CPU.Family == 0x0f))
568{
569switch(p->CPU.ExtFamily)
570{
571case 0x00: /* K8 */
572msr = rdmsr64(K8_FIDVID_STATUS);
573maxcoef = bitfield(msr, 21, 16) / 2 + 4;
574currcoef = bitfield(msr, 5, 0) / 2 + 4;
575break;
576
577case 0x01: /* K10 */
578msr = rdmsr64(K10_COFVID_STATUS);
579do_cpuid2(0x00000006, 0, p->CPU.CPUID[CPUID_6]);
580// EffFreq: effective frequency interface
581if (bitfield(p->CPU.CPUID[CPUID_6][2], 0, 0) == 1)
582{
583//uint64_t mperf = measure_mperf_frequency();
584uint64_t aperf = measure_aperf_frequency();
585cpuFrequency = aperf;
586}
587// NOTE: tsc runs at the maccoeff (non turbo)
588//*not* at the turbo frequency.
589maxcoef = bitfield(msr, 54, 49) / 2 + 4;
590currcoef = bitfield(msr, 5, 0) + 0x10;
591currdiv = 2 << bitfield(msr, 8, 6);
592
593break;
594
595case 0x05: /* K14 */
596msr = rdmsr64(K10_COFVID_STATUS);
597currcoef = (bitfield(msr, 54, 49) + 0x10) << 2;
598currdiv = (bitfield(msr, 8, 4) + 1) << 2;
599currdiv += bitfield(msr, 3, 0);
600
601break;
602
603case 0x02: /* K11 */
604// not implimented
605break;
606}
607
608if (maxcoef)
609{
610if (currdiv)
611{
612if (!currcoef)
613{
614currcoef = maxcoef;
615}
616
617if (!cpuFrequency)
618{
619fsbFrequency = ((tscFrequency * currdiv) / currcoef);
620}
621else
622{
623fsbFrequency = ((cpuFrequency * currdiv) / currcoef);
624}
625DBG("%d.%d\n", currcoef / currdiv, ((currcoef % currdiv) * 100) / currdiv);
626}
627else
628{
629if (!cpuFrequency)
630{
631fsbFrequency = (tscFrequency / maxcoef);
632}
633else
634{
635fsbFrequency = (cpuFrequency / maxcoef);
636}
637DBG("%d\n", currcoef);
638}
639}
640else if (currcoef)
641{
642if (currdiv)
643{
644fsbFrequency = ((tscFrequency * currdiv) / currcoef);
645DBG("%d.%d\n", currcoef / currdiv, ((currcoef % currdiv) * 100) / currdiv);
646}
647else
648{
649fsbFrequency = (tscFrequency / currcoef);
650DBG("%d\n", currcoef);
651}
652}
653if (!cpuFrequency) cpuFrequency = tscFrequency;
654}
655
656#if 0
657if (!fsbFrequency)
658{
659fsbFrequency = (DEFAULT_FSB * 1000);
660cpuFrequency = tscFrequency;
661DBG("0 ! using the default value for FSB !\n");
662}
663#endif
664
665p->CPU.MaxCoef = maxcoef;
666p->CPU.MaxDiv = maxdiv;
667p->CPU.CurrCoef = currcoef;
668p->CPU.CurrDiv = currdiv;
669p->CPU.TSCFrequency = tscFrequency;
670p->CPU.FSBFrequency = fsbFrequency;
671p->CPU.CPUFrequency = cpuFrequency;
672
673// keep formatted with spaces instead of tabs
674DBG("CPU: Brand String: %s\n", p->CPU.BrandString);
675 DBG("CPU: Vendor/Family/ExtFamily: 0x%x/0x%x/0x%x\n", p->CPU.Vendor, p->CPU.Family, p->CPU.ExtFamily);
676 DBG("CPU: Model/ExtModel/Stepping: 0x%x/0x%x/0x%x\n", p->CPU.Model, p->CPU.ExtModel, p->CPU.Stepping);
677 DBG("CPU: MaxCoef/CurrCoef: 0x%x/0x%x\n", p->CPU.MaxCoef, p->CPU.CurrCoef);
678 DBG("CPU: MaxDiv/CurrDiv: 0x%x/0x%x\n", p->CPU.MaxDiv, p->CPU.CurrDiv);
679 DBG("CPU: TSCFreq: %dMHz\n", p->CPU.TSCFrequency / 1000000);
680 DBG("CPU: FSBFreq: %dMHz\n", p->CPU.FSBFrequency / 1000000);
681 DBG("CPU: CPUFreq: %dMHz\n", p->CPU.CPUFrequency / 1000000);
682 DBG("CPU: NoCores/NoThreads: %d/%d\n", p->CPU.NoCores, p->CPU.NoThreads);
683 DBG("CPU: Features: 0x%08x\n", p->CPU.Features);
684#if DEBUG_CPU
685pause();
686#endif
687}
688

Archive Download this file

Revision: 2266